diff --git a/auto_store_non_realtime_SCADA_data.py b/auto_store_non_realtime_SCADA_data.py new file mode 100644 index 0000000..15ff023 --- /dev/null +++ b/auto_store_non_realtime_SCADA_data.py @@ -0,0 +1,76 @@ +import influxdb_api +import globals +from datetime import datetime, timedelta, timezone +import schedule +import time +from influxdb_client import InfluxDBClient, BucketsApi, WriteApi, OrganizationsApi, Point, QueryApi + + +# 2025/02/06 +def get_next_period_time() -> str: + """ + 获取下一个6小时时间点,返回格式为字符串'YYYY-MM-DDTHH:00:00+08:00' + :return: 返回字符串格式的时间,表示下一个6小时执行时间点 + """ + # 获取当前时间,并设定为北京时间 + now = datetime.now() # now 类型为 datetime,表示当前本地时间 + # 获取当前的小时数并计算下一个6小时时间点 + next_period_hour = (now.hour // 6 + 1) * 6 # next_period_hour 类型为 int,表示下一个6小时时间点的小时部分 + # 如果计算的小时大于23,表示进入第二天,调整为00:00 + if next_period_hour >= 24: + next_period_hour = 0 + now = now + timedelta(days=1) # 如果超过24小时,日期增加1天 + # 将秒和微秒部分清除,构建出下一个6小时点的datetime对象 + next_period_time = now.replace(hour=next_period_hour, minute=0, second=0, microsecond=0) + return next_period_time.strftime('%Y-%m-%dT%H:%M:%S+08:00') # 格式化为指定的字符串格式并返回 + + +# 2025/02/06 +def store_non_realtime_SCADA_data_job() -> None: + """ + 定义的任务2,每6小时执行一次,在0点、6点、12点、18点执行,执行时,更新get_history_data_end_time并调用store_non_realtime_SCADA_data_to_influxdb函数 + :return: None + """ + # 获取当前时间 + current_time = datetime.now() + # 只在0点、6点、12点、18点执行任务 + if current_time.hour % 6 == 0 and current_time.minute == 0: + # 获取下一个6小时的时间点,并更新get_history_data_end_time + get_history_data_end_time: str = get_next_period_time() # get_history_data_end_time 类型为 str,格式为'2025-02-06T12:00:00+08:00' + # 调用函数执行任务 + influxdb_api.store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time) + print('{} -- Successfully store non realtime SCADA data.'.format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + + +# 2025/02/06 +def store_non_realtime_SCADA_data_task() -> None: + """ + 定时执行6小时的任务,使用schedule库每分钟执行一次store_non_realtime_SCADA_data_job函数。 + 该任务会一直运行,定期调用store_non_realtime_SCADA_data_job获取SCADA数据。 + :return: + """ + try: + # 每分钟检查一次,执行store_non_realtime_SCADA_data_job + schedule.every(1).minute.do(store_non_realtime_SCADA_data_job) + + # 持续执行任务,检查是否有待执行的任务 + while True: + schedule.run_pending() # 执行所有待处理的定时任务 + time.sleep(1) # 暂停1秒,避免过于频繁的任务检查 + pass + except Exception as e: + print(f"Error occurred in store_non_realtime_SCADA_data_task: {e}") + + +if __name__ == "__main__": + url = "http://localhost:8086" # 替换为你的InfluxDB实例地址 + token = "Z4UZj9HuLwLlwoApywvT2nGVP3bwLy18y-sJQ7enzZlJd8YMzMWbBA6F-q4gBiZ-7-IqdxR5aR9LvicKiSNmnA==" # 替换为你的InfluxDB Token + org_name = "beibei" # 替换为你的Organization名称 + + client = InfluxDBClient(url=url, token=token) + + # step2: 先查询pg数据库中scada_info的信息,然后存储SCADA数据到SCADA_data这个bucket里 + influxdb_api.query_pg_scada_info_non_realtime('bb') + # 自动执行 + store_non_realtime_SCADA_data_task() diff --git a/auto_store_realtime_SCADA_data.py b/auto_store_realtime_SCADA_data.py new file mode 100644 index 0000000..1f19fff --- /dev/null +++ b/auto_store_realtime_SCADA_data.py @@ -0,0 +1,61 @@ +import influxdb_api +import globals +from datetime import datetime, timedelta, timezone +import schedule +import time +from influxdb_client import InfluxDBClient, BucketsApi, WriteApi, OrganizationsApi, Point, QueryApi + + +# 2025/02/01 +def get_next_time() -> str: + """ + 获取下一个1分钟时间点,返回格式为字符串'YYYY-MM-DDTHH:MM:00+08:00' + :return: 返回字符串格式的时间,表示下一个1分钟的时间点 + """ + # 获取当前时间,并设定为北京时间 + now = datetime.now() # now 类型为 datetime,表示当前本地时间 + # 获取当前的分钟,并且将秒和微秒置为零 + current_time = now.replace(second=0, microsecond=0) # current_time 类型为 datetime,时间的秒和微秒部分被清除 + return current_time.strftime('%Y-%m-%dT%H:%M:%S+08:00') + + +# 2025/02/06 +def store_realtime_SCADA_data_job() -> None: + """ + 定义的任务1,每分钟执行1次,每次执行时,更新get_real_value_time并调用store_realtime_SCADA_data_to_influxdb函数 + :return: None + """ + # 获取当前时间并更新get_real_value_time,转换为字符串格式 + get_real_value_time: str = get_next_time() # get_real_value_time 类型为 str,格式为'2025-02-01T18:45:00+08:00' + # 调用函数执行任务 + influxdb_api.store_realtime_SCADA_data_to_influxdb(get_real_value_time) + print('{} -- Successfully store realtime SCADA data.'.format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + + +# 2025/02/06 +def store_realtime_SCADA_data_task() -> None: + """ + 定时执行任务1和,使用schedule库每1分钟执行一次store_realtime_SCADA_data_job函数。 + 该任务会一直运行,定期调用store_realtime_SCADA_data_job获取SCADA数据。 + :return: + """ + # 每1分钟执行一次store_realtime_SCADA_data_job + schedule.every(1).minute.do(store_realtime_SCADA_data_job) + # 持续执行任务,检查是否有待执行的任务 + while True: + schedule.run_pending() # 执行所有待处理的定时任务 + time.sleep(1) # 暂停1秒,避免过于频繁的任务检查 + + +if __name__ == "__main__": + url = "http://localhost:8086" # 替换为你的InfluxDB实例地址 + token = "Z4UZj9HuLwLlwoApywvT2nGVP3bwLy18y-sJQ7enzZlJd8YMzMWbBA6F-q4gBiZ-7-IqdxR5aR9LvicKiSNmnA==" # 替换为你的InfluxDB Token + org_name = "beibei" # 替换为你的Organization名称 + + client = InfluxDBClient(url=url, token=token) + + # step2: 先查询pg数据库中scada_info的信息,然后存储SCADA数据到SCADA_data这个bucket里 + influxdb_api.query_pg_scada_info_realtime('bb') + # 自动执行 + store_realtime_SCADA_data_task() diff --git a/globals.py b/globals.py new file mode 100644 index 0000000..56bcd82 --- /dev/null +++ b/globals.py @@ -0,0 +1,54 @@ +# simulation.py中的全局变量 +# reservoir basic height +RESERVOIR_BASIC_HEIGHT = float(250.35) + +# 实时数据类:element_id和api_query_id对应 +reservoirs_id = {} +tanks_id = {} +fixed_pumps_id ={} +variable_pumps_id = {} +pressure_id = {} +demand_id = {} +quality_id = {} + +# 实时数据类:pattern_id和api_query_id对应 +source_outflow_pattern_id = {} +realtime_pipe_flow_pattern_id = {} +pipe_flow_region_patterns = {} # 根据realtime的pipe_flow,对non_realtime的demand进行分区 + +# 分区查询 +source_outflow_region = {} # 以绑定的管段作为value +source_outflow_region_id = {} # 以api_query_id作为value +source_outflow_region_patterns = {} # 以associated_pattern作为value +# 非实时数据的pattern +non_realtime_region_patterns = {} # 基于source_outflow_region进行区分 + +realtime_region_pipe_flow_and_demand_id = {} # 基于source_outflow_region搜索该分区中的实时pipe_flow和demand的api_query_id,后续用region的流量 - 实时流量计的流量 +realtime_region_pipe_flow_and_demand_patterns = {} # 基于source_outflow_region搜索该分区中的实时pipe_flow和demand的associated_pattern,后续用region的流量 - 实时流量计的流量 + +# --------------------------------------------------------- +# influxdb_api.py中的全局变量 +# 全局变量,用于存储不同类型的realtime api_query_id +reservoir_liquid_level_realtime_ids = [] +tank_liquid_level_realtime_ids = [] +fixed_pump_realtime_ids = [] +variable_pump_realtime_ids = [] +source_outflow_realtime_ids = [] +pipe_flow_realtime_ids = [] +pressure_realtime_ids = [] +demand_realtime_ids = [] +quality_realtime_ids = [] + +# transmission_frequency的最大值 +transmission_frequency = None +hydraulic_timestep = None + +reservoir_liquid_level_non_realtime_ids = [] +tank_liquid_level_non_realtime_ids = [] +fixed_pump_non_realtime_ids = [] +variable_pump_non_realtime_ids = [] +source_outflow_non_realtime_ids = [] +pipe_flow_non_realtime_ids = [] +pressure_non_realtime_ids = [] +demand_non_realtime_ids = [] +quality_non_realtime_ids = [] \ No newline at end of file diff --git a/influxdb_api.py b/influxdb_api.py index 5f1ffa5..0be8ea3 100644 --- a/influxdb_api.py +++ b/influxdb_api.py @@ -1,16 +1,248 @@ from influxdb_client import InfluxDBClient, BucketsApi, WriteApi, OrganizationsApi, Point, QueryApi from typing import List, Dict from datetime import datetime, timedelta, timezone +from influxdb_client.client.write_api import SYNCHRONOUS from dateutil import parser +import get_realValue +import get_data +import psycopg +import time +import simulation +from tjnetwork import * +import schedule +import threading +import globals # influxdb数据库连接信息 url = "http://localhost:8086" # 替换为你的InfluxDB实例地址 -token = "xGDM5RZqRJAuzAGS-otXUdC2NFdY75qJAjRLqAB4p5WcIIAlIUpOpT8_yA16AOHmJWerwQ_08gwb84sy42jnZQ==" # 替换为你的InfluxDB Token -org_name = "TJWATERORG" # 替换为你的Organization名称 +token = "Z4UZj9HuLwLlwoApywvT2nGVP3bwLy18y-sJQ7enzZlJd8YMzMWbBA6F-q4gBiZ-7-IqdxR5aR9LvicKiSNmnA==" # 替换为你的InfluxDB Token +org_name = "beibei" # 替换为你的Organization名称 client = InfluxDBClient(url=url, token=token, org=org_name) +# # 所有实时更新数据的SCADA设备的ID +# flow_device_ids = ['2498', '3854', '3853'] +# pressure_device_ids = ['2510', '2514'] +# reservoir_liquid_level_ids = ['2497', '2571'] +# tank_liquid_level_ids = ['4780', '9774'] +# pump_device_ids = ['2747', '2776', '2730', '2787', '2500', '2502', '2504'] +# +# # 用于更改数据的SCADA设的ID +# change_data_device_ids = ['2498', '3854', '3853', '2497', '2571', '4780', '9774', +# '2747', '2776', '2730', '2787', '2500', '2502', '2504'] +# # 全局变量,用于存储不同类型的realtime api_query_id +# reservoir_liquid_level_realtime_ids = [] +# tank_liquid_level_realtime_ids = [] +# fixed_pump_realtime_ids = [] +# variable_pump_realtime_ids = [] +# source_outflow_realtime_ids = [] +# pipe_flow_realtime_ids = [] +# pressure_realtime_ids = [] +# demand_realtime_ids = [] +# quality_realtime_ids = [] +# +# # transmission_frequency的最大值 +# transmission_frequency = None +# hydraulic_timestep = None +# +# reservoir_liquid_level_non_realtime_ids = [] +# tank_liquid_level_non_realtime_ids = [] +# fixed_pump_non_realtime_ids = [] +# variable_pump_non_realtime_ids = [] +# source_outflow_non_realtime_ids = [] +# pipe_flow_non_realtime_ids = [] +# pressure_non_realtime_ids = [] +# demand_non_realtime_ids = [] +# quality_non_realtime_ids = [] + + +def query_pg_scada_info_realtime(name: str) -> None: + """ + 查询pg数据库中,scada_info中,属于realtime的数据 + :param name: 数据库名称 + :return: + """ + # 连接数据库 + conn_string = f"dbname={name} host=127.0.0.1" + try: + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 查询 transmission_mode 为 'realtime' 的记录 + cur.execute(""" + SELECT type, api_query_id + FROM scada_info + WHERE transmission_mode = 'realtime'; + """) + records = cur.fetchall() + + # 清空全局列表 + globals.reservoir_liquid_level_realtime_ids.clear() + globals.tank_liquid_level_realtime_ids.clear() + globals.fixed_pump_realtime_ids.clear() + globals.variable_pump_realtime_ids.clear() + globals.source_outflow_realtime_ids.clear() + globals.pipe_flow_realtime_ids.clear() + globals.pressure_realtime_ids.clear() + globals.demand_realtime_ids.clear() + globals.quality_realtime_ids.clear() + + # 根据 type 分类存储 api_query_id + for record in records: + record_type, api_query_id = record + if api_query_id is not None: # 确保 api_query_id 不为空 + if record_type == "reservoir_liquid_level": + globals.reservoir_liquid_level_realtime_ids.append(api_query_id) + elif record_type == "tank_liquid_level": + globals.tank_liquid_level_realtime_ids.append(api_query_id) + elif record_type == "fixed_pump": + globals.fixed_pump_realtime_ids.append(api_query_id) + elif record_type == "variable_pump": + globals.variable_pump_realtime_ids.append(api_query_id) + elif record_type == "source_outflow": + globals.source_outflow_realtime_ids.append(api_query_id) + elif record_type == "pipe_flow": + globals.pipe_flow_realtime_ids.append(api_query_id) + elif record_type == "pressure": + globals.pressure_realtime_ids.append(api_query_id) + elif record_type == "demand": + globals.demand_realtime_ids.append(api_query_id) + elif record_type == "quality": + globals.quality_realtime_ids.append(api_query_id) + + # 打印结果,方便调试 + # print("Query completed. Results:") + # print("Reservoir Liquid Level IDs:", globals.reservoir_liquid_level_realtime_ids) + # print("Tank Liquid Level IDs:", globals.tank_liquid_level_realtime_ids) + # print("Fixed Pump IDs:", globals.fixed_pump_realtime_ids) + # print("Variable Pump IDs:", globals.variable_pump_realtime_ids) + # print("Source Outflow IDs:", globals.source_outflow_realtime_ids) + # print("Pipe Flow IDs:", globals.pipe_flow_realtime_ids) + # print("Pressure IDs:", globals.pressure_realtime_ids) + # print("Demand IDs:", globals.demand_realtime_ids) + # print("Quality IDs:", globals.quality_realtime_ids) + + except Exception as e: + print(f"查询时发生错误:{e}") + + +def query_pg_scada_info_non_realtime(name: str) -> None: + """ + 查询pg数据库中,scada_info中,属于non_realtime的数据,以及这些数据transmission_frequency的最大值 + :param name: 数据库名称 + :return: + """ + # 重新打开数据库 + if is_project_open(name): + close_project(name) + open_project(name) + dic_time = get_time(name) + globals.hydraulic_timestep = dic_time['HYDRAULIC TIMESTEP'] + close_project(name) + # 连接数据库 + conn_string = f"dbname={name} host=127.0.0.1" + try: + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 查询 transmission_mode 为 'non_realtime' 的记录 + cur.execute(""" + SELECT type, api_query_id, transmission_frequency + FROM scada_info + WHERE transmission_mode = 'non_realtime'; + """) + records = cur.fetchall() + + # 清空全局列表 + globals.reservoir_liquid_level_non_realtime_ids.clear() + globals.fixed_pump_non_realtime_ids.clear() + globals.variable_pump_non_realtime_ids.clear() + globals.source_outflow_non_realtime_ids.clear() + globals.pipe_flow_non_realtime_ids.clear() + globals.pressure_non_realtime_ids.clear() + globals.demand_non_realtime_ids.clear() + globals.quality_non_realtime_ids.clear() + + # 用于计算 transmission_frequency 最大值 + transmission_frequencies = [] + + # 根据 type 分类存储 api_query_id + for record in records: + record_type, api_query_id, freq = record + if api_query_id is not None: # 确保 api_query_id 不为空 + if record_type == "reservoir_liquid_level": + globals.reservoir_liquid_level_non_realtime_ids.append(api_query_id) + elif record_type == "fixed_pump": + globals.fixed_pump_non_realtime_ids.append(api_query_id) + elif record_type == "variable_pump": + globals.variable_pump_non_realtime_ids.append(api_query_id) + elif record_type == "source_outflow": + globals.source_outflow_non_realtime_ids.append(api_query_id) + elif record_type == "pipe_flow": + globals.pipe_flow_non_realtime_ids.append(api_query_id) + elif record_type == "pressure": + globals.pressure_non_realtime_ids.append(api_query_id) + elif record_type == "demand": + globals.demand_non_realtime_ids.append(api_query_id) + elif record_type == "quality": + globals.quality_non_realtime_ids.append(api_query_id) + + # 收集 transmission_frequency,用于计算最大值 + if freq is not None: + transmission_frequencies.append(freq) + + # 计算 transmission_frequency 最大值 + globals.transmission_frequency = max(transmission_frequencies) if transmission_frequencies else None + + # 打印结果,方便调试 + # print("Query completed. Results:") + # print("Reservoir Liquid Level Non-Realtime IDs:", globals.reservoir_liquid_level_non_realtime_ids) + # print("Fixed Pump Non-Realtime IDs:", globals.fixed_pump_non_realtime_ids) + # print("Variable Pump Non-Realtime IDs:", globals.variable_pump_non_realtime_ids) + # print("Source Outflow Non-Realtime IDs:", globals.source_outflow_non_realtime_ids) + # print("Pipe Flow Non-Realtime IDs:", globals.pipe_flow_non_realtime_ids) + # print("Pressure Non-Realtime IDs:", globals.pressure_non_realtime_ids) + # print("Demand Non-Realtime IDs:", globals.demand_non_realtime_ids) + # print("Quality Non-Realtime IDs:", globals.quality_non_realtime_ids) + # print("Maximum Transmission Frequency:", globals.transmission_frequency) + # print("Hydraulic Timestep:", globals.hydraulic_timestep) + + + except Exception as e: + print(f"查询时发生错误:{e}") + + +# 2025/02/01 +def delete_buckets(client: InfluxDBClient, org_name: str) -> None: + """ + 删除InfluxDB中指定organization下的所有buckets。 + :param client: (InfluxDBClient): 已初始化的 InfluxDBClient 实例。 + :param org_name: InfluxDB中organization的名称。 + :return: None + """ + buckets_api = client.buckets_api() + buckets_obj = buckets_api.find_buckets(org=org_name) + + # 确保 buckets_obj 拥有 buckets 属性 + if hasattr(buckets_obj, 'buckets'): + for bucket in buckets_obj.buckets: + try: + buckets_api.delete_bucket(bucket) + print(f"Bucket {bucket.name} has been deleted successfully.") + except Exception as e: + print(f"Failed to delete bucket {bucket.name}: {e}") + else: + print("未找到 buckets 属性,无法迭代 buckets。") + + +# 2025/02/01 def create_and_initialize_buckets(client: InfluxDBClient, org_name: str) -> None: + """ + 初始化influxdb的三个数据存储库,分别为SCADA_data、realtime_simulation_result、scheme_simulation_result + :param client: (InfluxDBClient): 已初始化的 InfluxDBClient 实例。 + :param org_name: InfluxDB中organization的名称 + :return: + """ + # 先删除原有的,然后再进行初始化 + delete_buckets(client, org_name) bucket_api = BucketsApi(client) write_api = client.write_api() @@ -26,8 +258,8 @@ def create_and_initialize_buckets(client: InfluxDBClient, org_name: str) -> None # 定义 Buckets 信息 buckets = [ {"name": "SCADA_data", "retention_rules": []}, - {"name": "realtime_data", "retention_rules": []}, - {"name": "scheme_simulation", "retention_rules": []} + {"name": "realtime_simulation_result", "retention_rules": []}, + {"name": "scheme_simulation_result", "retention_rules": []} ] # 创建 Buckets 并初始化数据 @@ -44,7 +276,7 @@ def create_and_initialize_buckets(client: InfluxDBClient, org_name: str) -> None if bucket["name"] == "SCADA_data": point = Point("SCADA") \ .tag("date", None) \ - .tag("type", None) \ + .tag("description", None) \ .tag("device_ID", None) \ .field("monitored_value", 0.0) \ .field("datacleaning_value", 0.0) \ @@ -53,7 +285,7 @@ def create_and_initialize_buckets(client: InfluxDBClient, org_name: str) -> None write_api.write(bucket="SCADA_data", org=org_name, record=point) print("Initialized SCADA_data with default structure.") - elif bucket["name"] == "realtime_data": + elif bucket["name"] == "realtime_simulation_result": # realtime_simulation_result link_point = Point("link") \ .tag("date", None) \ .tag("ID", None) \ @@ -77,11 +309,11 @@ def create_and_initialize_buckets(client: InfluxDBClient, org_name: str) -> None .field("quality", 0.0) \ .time("2024-11-21T00:00:00Z") - write_api.write(bucket="realtime_data", org=org_name, record=link_point) - write_api.write(bucket="realtime_data", org=org_name, record=node_point) - print("Initialized realtime_data with default structure.") + write_api.write(bucket="realtime_simulation_result", org=org_name, record=link_point) + write_api.write(bucket="realtime_simulation_result", org=org_name, record=node_point) + print("Initialized realtime_simulation_result with default structure.") - elif bucket["name"] == "scheme_simulation": + elif bucket["name"] == "scheme_simulation_result": link_point = Point("link") \ .tag("date", None) \ .tag("ID", None) \ @@ -109,31 +341,688 @@ def create_and_initialize_buckets(client: InfluxDBClient, org_name: str) -> None .field("quality", 0.0) \ .time("2024-11-21T00:00:00Z") - write_api.write(bucket="scheme_simulation", org=org_name, record=link_point) - write_api.write(bucket="scheme_simulation", org=org_name, record=node_point) - print("Initialized scheme_simulation with default structure.") + write_api.write(bucket="scheme_simulation_result", org=org_name, record=link_point) + write_api.write(bucket="scheme_simulation_result", org=org_name, record=node_point) + print("Initialized scheme_simulation_result with default structure.") print("All buckets created and initialized successfully.") -def store_realtime_data_to_influxdb(node_result_list: List[Dict[str, any]], link_result_list: List[Dict[str, any]], - result_start_time: str, - bucket: str = "realtime_data", client: InfluxDBClient = client): +def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str = "SCADA_data", client: InfluxDBClient = client) -> None: """ - 将实时数据存储到 InfluxDB 的realtime_data这个bucket中。 + 将SCADA数据通过数据接口导入数据库 + :param get_real_value_time: 获取数据的时间,格式如'2024-11-25T09:00:00+08:00' + :param bucket: (str): InfluxDB 的 bucket 名称,默认值为 "SCADA_data"。 + :param client: (InfluxDBClient): 已初始化的 InfluxDBClient 实例。 + :return: + """ + if client.ping(): + print("{} -- Successfully connected to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + else: + print("{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + write_api = client.write_api(write_options=SYNCHRONOUS) + try_count = 0 + reservoir_liquid_level_realtime_data_list = [] + tank_liquid_level_realtime_data_list = [] + fixed_pump_realtime_data_list =[] + variable_pump_realtime_data_list =[] + source_outflow_realtime_data_list = [] + pipe_flow_realtime_data_list = [] + pressure_realtime_data_list =[] + demand_realtime_data_list = [] + quality_realtime_data_list = [] + while try_count <= 5: # 尝试6次 ******* + try: + try_count += 1 + if globals.reservoir_liquid_level_realtime_ids: + reservoir_liquid_level_realtime_data_list = get_realValue.get_realValue( + ids=','.join(globals.reservoir_liquid_level_realtime_ids)) + if globals.tank_liquid_level_realtime_ids: + tank_liquid_level_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.tank_liquid_level_realtime_ids)) + if globals.fixed_pump_realtime_ids: + fixed_pump_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.fixed_pump_realtime_ids)) + if globals.variable_pump_realtime_ids: + variable_pump_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.variable_pump_realtime_ids)) + if globals.source_outflow_realtime_ids: + source_outflow_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.source_outflow_realtime_ids)) + if globals.pipe_flow_realtime_ids: + pipe_flow_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.pipe_flow_realtime_ids)) + if globals.pressure_realtime_ids: + pressure_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.pressure_realtime_ids)) + if globals.demand_realtime_ids: + demand_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.demand_realtime_ids)) + if globals.quality_realtime_ids: + quality_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.quality_realtime_ids)) + except Exception as e: + print(e) + time.sleep(10) + else: + try_count = 100 + + # 写入数据 + if reservoir_liquid_level_realtime_data_list: + for data in reservoir_liquid_level_realtime_data_list: + # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 + data_time = datetime.fromisoformat(data['time']) + get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + + # 将获取的时间转换为 UTC 时间 + get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) + + # 计算时间差(绝对值) + time_difference = abs((data_time - get_real_value_time_dt).total_seconds()) + + # 判断时间差是否超过1分钟 + if time_difference > 60: # 超过1分钟 + monitored_value = None + else: # 小于等于3分钟 + monitored_value = data['monitored_value'] + + # 创建Point对象 + point = ( + Point('reservoir_liquid_level_realtime') + .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", monitored_value) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(get_real_value_time_utc) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if tank_liquid_level_realtime_data_list: + for data in tank_liquid_level_realtime_data_list: + # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 + data_time = datetime.fromisoformat(data['time']) + get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + + # 将获取的时间转换为 UTC 时间 + get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) + + # 计算时间差(绝对值) + time_difference = abs((data_time - get_real_value_time_dt).total_seconds()) + + # 判断时间差是否超过1分钟 + if time_difference > 60: # 超过1分钟 + monitored_value = None + else: # 小于等于3分钟 + monitored_value = data['monitored_value'] + + # 创建Point对象 + point = ( + Point('tank_liquid_level_realtime') + .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", (monitored_value)) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(get_real_value_time_utc) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if fixed_pump_realtime_data_list: + for data in fixed_pump_realtime_data_list: + # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 + data_time = datetime.fromisoformat(data['time']) + get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + + # 将获取的时间转换为 UTC 时间 + get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) + + # 计算时间差(绝对值) + time_difference = abs((data_time - get_real_value_time_dt).total_seconds()) + + # 判断时间差是否超过1分钟 + if time_difference > 60: # 超过1分钟 + monitored_value = None + else: # 小于等于3分钟 + monitored_value = data['monitored_value'] + + # 创建Point对象 + point = ( + Point('fixed_pump_realtime') + .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", monitored_value) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(get_real_value_time_utc) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if variable_pump_realtime_data_list: + for data in variable_pump_realtime_data_list: + # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 + data_time = datetime.fromisoformat(data['time']) + get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + + # 将获取的时间转换为 UTC 时间 + get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) + + # 计算时间差(绝对值) + time_difference = abs((data_time - get_real_value_time_dt).total_seconds()) + + # 判断时间差是否超过1分钟 + if time_difference > 60: # 超过1分钟 + monitored_value = None + else: # 小于等于3分钟 + monitored_value = data['monitored_value'] + + # 创建Point对象 + point = ( + Point('variable_pump_realtime') + .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", monitored_value) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(get_real_value_time_utc) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if source_outflow_realtime_data_list: + for data in source_outflow_realtime_data_list: + # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 + data_time = datetime.fromisoformat(data['time']) + get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + + # 将获取的时间转换为 UTC 时间 + get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) + + # 计算时间差(绝对值) + time_difference = abs((data_time - get_real_value_time_dt).total_seconds()) + + # 判断时间差是否超过1分钟 + if time_difference > 60: # 超过1分钟 + monitored_value = None + else: # 小于等于3分钟 + monitored_value = data['monitored_value'] + + # 创建Point对象 + point = ( + Point('source_outflow_realtime') + .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", monitored_value) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(get_real_value_time_utc) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if pipe_flow_realtime_data_list: + for data in pipe_flow_realtime_data_list: + # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 + data_time = datetime.fromisoformat(data['time']) + get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + + # 将获取的时间转换为 UTC 时间 + get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) + + # 计算时间差(绝对值) + time_difference = abs((data_time - get_real_value_time_dt).total_seconds()) + + # 判断时间差是否超过1分钟 + if time_difference > 60: # 超过1分钟 + monitored_value = None + else: # 小于等于3分钟 + monitored_value = data['monitored_value'] + + # 创建Point对象 + point = ( + Point('pipe_flow_realtime') + .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", monitored_value) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(get_real_value_time_utc) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if pressure_realtime_data_list: + for data in pressure_realtime_data_list: + # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 + data_time = datetime.fromisoformat(data['time']) + get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + + # 将获取的时间转换为 UTC 时间 + get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) + + # 计算时间差(绝对值) + time_difference = abs((data_time - get_real_value_time_dt).total_seconds()) + + # 判断时间差是否超过1分钟 + if time_difference > 60: # 超过1分钟 + monitored_value = None + else: # 小于等于3分钟 + monitored_value = data['monitored_value'] + + # 创建Point对象 + point = ( + Point('pressure_realtime') + .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", monitored_value) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(get_real_value_time_utc) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if demand_realtime_data_list: + for data in demand_realtime_data_list: + # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 + data_time = datetime.fromisoformat(data['time']) + get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + + # 将获取的时间转换为 UTC 时间 + get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) + + # 计算时间差(绝对值) + time_difference = abs((data_time - get_real_value_time_dt).total_seconds()) + + # 判断时间差是否超过1分钟 + if time_difference > 60: # 超过1分钟 + monitored_value = None + else: # 小于等于3分钟 + monitored_value = data['monitored_value'] + + # 创建Point对象 + point = ( + Point('demand_realtime') + .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", monitored_value) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(get_real_value_time_utc) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if quality_realtime_data_list: + for data in quality_realtime_data_list: + # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 + data_time = datetime.fromisoformat(data['time']) + get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + + # 将获取的时间转换为 UTC 时间 + get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) + + # 计算时间差(绝对值) + time_difference = abs((data_time - get_real_value_time_dt).total_seconds()) + + # 判断时间差是否超过1分钟 + if time_difference > 60: # 超过1分钟 + monitored_value = None + else: # 小于等于3分钟 + monitored_value = data['monitored_value'] + + # 创建Point对象 + point = ( + Point('quality_realtime') + .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", monitored_value) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(get_real_value_time_utc) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + +def convert_time_format(original_time: str) -> str: + """ + 格式转换,将“2024-04-13T08:00:00+08:00"转为“2024-04-13 08:00:00” + :param original_time: str, “2024-04-13T08:00:00+08:00"格式的时间 + :return: str,“2024-04-13 08:00:00”格式的时间 + """ + new_time = original_time.replace('T', ' ') + new_time = new_time.replace('+08:00', '') + return new_time + + +# 筛选符合条件的数据 +def is_timestep_multiple(data_time, timestep): + # 获取时间点距离当天0点的时间差 + midnight = data_time.replace(hour=0, minute=0, second=0, microsecond=0) + delta_since_midnight = data_time - midnight + # 检查时间差是否为时间步长的整数倍 + return delta_since_midnight.total_seconds() % timestep.total_seconds() == 0 + +# 2025/01/10 +def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bucket: str = "SCADA_data", client: InfluxDBClient = client) -> None: + """ + 获取某段时间内传回的scada数据 + :param get_history_data_end_time: 获取历史数据的终止时间时间,格式如'2024-11-25T09:00:00+08:00' + :param bucket: (str): InfluxDB 的 bucket 名称,默认值为 "SCADA_data"。 + :param client: (InfluxDBClient): 已初始化的 InfluxDBClient 实例。 + :return: + """ + if client.ping(): + print("{} -- Successfully connected to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + else: + print("{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + write_api = client.write_api(write_options=SYNCHRONOUS) + + # 将end_date字符串转换为datetime对象 + end_date_dt = datetime.strptime(convert_time_format(get_history_data_end_time), '%Y-%m-%d %H:%M:%S') + end_date = end_date_dt.strftime('%Y-%m-%d %H:%M:%S') + # 将transmission_frequency字符串转换为timedelta对象 + transmission_frequency_dt = datetime.strptime(globals.transmission_frequency, '%H:%M:%S') - datetime(1900, 1, 1) + get_history_data_start_time = end_date_dt - transmission_frequency_dt + begin_date = get_history_data_start_time.strftime('%Y-%m-%d %H:%M:%S') + + reservoir_liquid_level_non_realtime_data_list = [] + tank_liquid_level_non_realtime_data_list = [] + fixed_pump_non_realtime_data_list = [] + variable_pump_non_realtime_data_list = [] + source_outflow_non_realtime_data_list = [] + pipe_flow_non_realtime_data_list = [] + pressure_non_realtime_data_list = [] + demand_non_realtime_data_list = [] + quality_non_realtime_data_list = [] + + + try_count = 0 + while try_count < 5: + try: + try_count += 1 + # reservoir_liquid_level_non_realtime_data_list = get_data.get_history_data( + # ids=','.join(reservoir_liquid_level_non_realtime_ids), begin_date=begin_date, end_date=end_date, downsample='1m') + if globals.reservoir_liquid_level_non_realtime_ids: + reservoir_liquid_level_non_realtime_data_list = get_data.get_history_data( + ids=','.join(globals.reservoir_liquid_level_non_realtime_ids), + begin_date=begin_date, end_date=end_date, + downsample='1m') + + if globals.tank_liquid_level_non_realtime_ids: + tank_liquid_level_non_realtime_data_list = get_data.get_history_data( + ids=','.join(globals.tank_liquid_level_non_realtime_ids), + begin_date=begin_date, end_date=end_date, + downsample='1m') + + if globals.fixed_pump_non_realtime_ids: + fixed_pump_non_realtime_data_list = get_data.get_history_data( + ids=','.join(globals.fixed_pump_non_realtime_ids), + begin_date=begin_date, end_date=end_date, + downsample='1m') + + if globals.variable_pump_non_realtime_ids: + variable_pump_non_realtime_data_list = get_data.get_history_data( + ids=','.join(globals.variable_pump_non_realtime_ids), + begin_date=begin_date, end_date=end_date, + downsample='1m') + + if globals.source_outflow_non_realtime_ids: + source_outflow_non_realtime_data_list = get_data.get_history_data( + ids=','.join(globals.source_outflow_non_realtime_ids), + begin_date=begin_date, end_date=end_date, + downsample='1m') + + if globals.pipe_flow_non_realtime_ids: + pipe_flow_non_realtime_data_list = get_data.get_history_data( + ids=','.join(globals.pipe_flow_non_realtime_ids), + begin_date=begin_date, end_date=end_date, + downsample='1m') + # print(pipe_flow_non_realtime_data_list) + + if globals.pressure_non_realtime_ids: + pressure_non_realtime_data_list = get_data.get_history_data( + ids=','.join(globals.pressure_non_realtime_ids), + begin_date=begin_date, end_date=end_date, + downsample='1m') + + if globals.demand_non_realtime_ids: + demand_non_realtime_data_list = get_data.get_history_data( + ids=','.join(globals.demand_non_realtime_ids), + begin_date=begin_date, end_date=end_date, + downsample='1m') + + if globals.quality_non_realtime_ids: + quality_non_realtime_data_list = get_data.get_history_data( + ids=','.join(globals.quality_non_realtime_ids), + begin_date=begin_date, end_date=end_date, + downsample='1m') + + except Exception as e: + print(f"Attempt {try_count} failed with error: {e}") + if try_count < 5: + print("Retrying in 10 seconds...") + time.sleep(10) + else: + print("Max retries reached. Exiting.") + + else: + print("Data fetched successfully.") + break # 成功后退出循环 + + if reservoir_liquid_level_non_realtime_data_list: + for data in reservoir_liquid_level_non_realtime_data_list: + # 创建Point对象 + point = ( + Point('reservoir_liquid_level_non_realtime') + .tag("date", data['time'].strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", data['monitored_value']) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(data['time']) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if tank_liquid_level_non_realtime_data_list: + for data in tank_liquid_level_non_realtime_data_list: + # 创建Point对象 + point = ( + Point('tank_liquid_level_non_realtime') + .tag("date", data['time'].strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", data['monitored_value']) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(data['time']) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if fixed_pump_non_realtime_data_list: + for data in fixed_pump_non_realtime_data_list: + # 创建Point对象 + point = ( + Point('fixed_pump_non_realtime') + .tag("date", data['time'].strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", data['monitored_value']) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(data['time']) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if variable_pump_non_realtime_data_list: + for data in variable_pump_non_realtime_data_list: + # 创建Point对象 + point = ( + Point('variable_pump_non_realtime') + .tag("date", data['time'].strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", data['monitored_value']) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(data['time']) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if source_outflow_non_realtime_data_list: + for data in source_outflow_non_realtime_data_list: + # 创建Point对象 + point = ( + Point('source_outflow_non_realtime') + .tag("date", data['time'].strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", data['monitored_value']) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(data['time']) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + # + if pipe_flow_non_realtime_data_list: + for data in pipe_flow_non_realtime_data_list: + # 创建Point对象 + point = ( + Point('pipe_flow_non_realtime') + .tag("date", data['time'].strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", data['monitored_value']) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(data['time']) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if pressure_non_realtime_data_list: + for data in pressure_non_realtime_data_list: + # 创建Point对象 + point = ( + Point('pressure_non_realtime') + .tag("date", data['time'].strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", data['monitored_value']) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(data['time']) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if demand_non_realtime_data_list: + for data in demand_non_realtime_data_list: + # 创建Point对象 + point = ( + Point('demand_non_realtime') + .tag("date", data['time'].strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", data['monitored_value']) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(data['time']) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + if quality_non_realtime_data_list: + for data in quality_non_realtime_data_list: + # 创建Point对象 + point = ( + Point('quality_non_realtime') + .tag("date", data['time'].strftime('%Y-%m-%d')) + .tag("description", data['description']) + .tag("device_ID", data['device_ID']) + .field("monitored_value", data['monitored_value']) + .field("datacleaning_value", None) + .field("simulation_value", None) + .time(data['time']) + ) + write_api.write(bucket=bucket, org=org_name, record=point) + + +def query_SCADA_data_by_device_ID_and_time(query_ids_list: List[str], query_time: str, bucket: str="SCADA_data", client: InfluxDBClient=client) -> Dict[str, float]: + """ + 根据SCADA设备的ID和时间查询值 + :param query_ids_list: SCADA设备ID的列表 + :param query_time: 输入的北京时间,格式为 '2024-11-24T17:30:00+08:00'。 + :param bucket: InfluxDB 的 bucket 名称,默认值为 "SCADA_data"。 + :param client: 已初始化的 InfluxDBClient 实例。 + :return: + """ + if client.ping(): + print("{} -- Successfully connected to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + else: + print("{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + query_api = client.query_api() + # 将北京时间转换为 UTC 时间 + beijing_time = datetime.fromisoformat(query_time) + utc_time = beijing_time.astimezone(timezone.utc) + utc_start_time = utc_time - timedelta(seconds=1) + utc_stop_time = utc_time + timedelta(seconds=1) + + # 构建查询字典 + SCADA_result_dict = {} + + for device_id in query_ids_list: + # 构建 Flux 查询语句 + flux_query = f''' + from(bucket: "{bucket}") + |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) + |> filter(fn: (r) => r["device_ID"] == "{device_id}") + |> filter(fn: (r) => r["_field"] == "monitored_value") + ''' + + # 执行查询 + try: + result = query_api.query(flux_query) + + # 从查询结果中提取 monitored_value + if result: + # 假设返回的结果为一行数据 + for table in result: + for record in table.records: + # 获取字段 "_value" 即为 monitored_value + monitored_value = record.get_value() + SCADA_result_dict[device_id] = monitored_value + else: + # 如果没有结果,默认设置为 None 或其他值 + SCADA_result_dict[device_id] = None + except Exception as e: + print(f"Error querying InfluxDB for device ID {device_id}: {e}") + SCADA_result_dict[device_id] = None + + return SCADA_result_dict + + +# 2025/02/01 +def store_realtime_simulation_result_to_influxdb(node_result_list: List[Dict[str, any]], link_result_list: List[Dict[str, any]], + result_start_time: str, + bucket: str = "realtime_simulation_result", client: InfluxDBClient = client): + """ + 将实时模拟计算结果数据存储到 InfluxDB 的realtime_simulation_result这个bucket中。 :param node_result_list: (List[Dict[str, any]]): 包含节点和结果数据的字典列表。 :param link_result_list: (List[Dict[str, any]]): 包含连接和结果数据的字典列表。 :param result_start_time: (str): 计算结果的模拟开始时间。 - :param bucket: (str): InfluxDB 的 bucket 名称,默认值为 "realtime_data"。 + :param bucket: (str): InfluxDB 的 bucket 名称,默认值为 "realtime_simulation_result"。 :param client: (InfluxDBClient): 已初始化的 InfluxDBClient 实例。 :return: """ if client.ping(): - print("Successfully connected to InfluxDB.") + print("{} -- Successfully connected to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) else: - print("Failed to connect to InfluxDB.") + print("{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) # 开始写入数据 try: @@ -185,7 +1074,8 @@ def store_realtime_data_to_influxdb(node_result_list: List[Dict[str, any]], link raise RuntimeError(f"数据写入 InfluxDB 时发生错误: {e}") -def query_latest_record_by_ID(ID: str, type: str, bucket: str="realtime_data", client: InfluxDBClient=client) -> dict: +# 2025/02/01 +def query_latest_record_by_ID(ID: str, type: str, bucket: str="realtime_simulation_result", client: InfluxDBClient=client) -> dict: """ 查询指定ID的最新的一条记录 :param ID: (str): 要查询的 ID。 @@ -195,9 +1085,11 @@ def query_latest_record_by_ID(ID: str, type: str, bucket: str="realtime_data", c :return: dict: 最新记录的数据,如果没有找到则返回 None。 """ if client.ping(): - print("Successfully connected to InfluxDB.") + print("{} -- Successfully connected to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) else: - print("Failed to connect to InfluxDB.") + print("{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) query_api = client.query_api() if type == "node": flux_query = f''' @@ -261,7 +1153,8 @@ def query_latest_record_by_ID(ID: str, type: str, bucket: str="realtime_data", c return None # 如果没有找到记录 -def query_all_record_by_time(query_time: str, bucket: str="realtime_data", client: InfluxDBClient=client) -> tuple: +# 2025/02/01 +def query_all_record_by_time(query_time: str, bucket: str="realtime_simulation_result", client: InfluxDBClient=client) -> tuple: """ 查询指定北京时间的所有记录,包括 'node' 和 'link' measurement,分别以指定格式返回。 :param query_time: (str): 输入的北京时间,格式为 '2024-11-24T17:30:00+08:00'。 @@ -270,9 +1163,11 @@ def query_all_record_by_time(query_time: str, bucket: str="realtime_data", clien :return: dict: tuple: (node_records, link_records) """ if client.ping(): - print("Successfully connected to InfluxDB.") + print("{} -- Successfully connected to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) else: - print("Failed to connect to InfluxDB.") + print("{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) query_api = client.query_api() # 将北京时间转换为 UTC 时间 beijing_time = datetime.fromisoformat(query_time) @@ -330,8 +1225,9 @@ def query_all_record_by_time(query_time: str, bucket: str="realtime_data", clien return node_records, link_records -# DingZQ return list of dict { 'time': 'timevalue', 'value', 'valuevalue' } -def query_curve_by_ID_property_daterange(ID: str, type: str, property: str, start_date: str, end_date: str, bucket: str="realtime_data", client: InfluxDBClient=client) -> list: + +# 2025/02/01 +def query_curve_by_ID_property_daterange(ID: str, type: str, property: str, start_date: str, end_date: str, bucket: str="realtime_simulation_result", client: InfluxDBClient=client) -> list: """ 根据 type 查询对应的 measurement,根据 ID 和 date 查询对应的 tag,根据 property 查询对应的 field。 :param ID: (str): 要查询的 ID(tag) @@ -339,14 +1235,16 @@ def query_curve_by_ID_property_daterange(ID: str, type: str, property: str, star :param property: (str): 查询的字段名称(field) :param start_date: (str): 查询的开始日期,格式为 'YYYY-MM-DD' :param end_date: (str): 查询的结束日期,格式为 'YYYY-MM-DD' - :param bucket: (str): 数据存储的 bucket 名称,默认值为 "realtime_data" + :param bucket: (str): 数据存储的 bucket 名称,默认值为 "realtime_simulation_result" :param client: (InfluxDBClient): 已初始化的 InfluxDBClient 实例 :return: 查询结果的列表 """ if client.ping(): - print("Successfully connected to InfluxDB.") + print("{} -- Successfully connected to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) else: - print("Failed to connect to InfluxDB.") + print("{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) query_api = client.query_api() # 确定 measurement if type == "node": @@ -388,39 +1286,57 @@ def query_curve_by_ID_property_daterange(ID: str, type: str, property: str, star return results + # 示例调用 if __name__ == "__main__": url = "http://localhost:8086" # 替换为你的InfluxDB实例地址 - token = "xGDM5RZqRJAuzAGS-otXUdC2NFdY75qJAjRLqAB4p5WcIIAlIUpOpT8_yA16AOHmJWerwQ_08gwb84sy42jnZQ==" # 替换为你的InfluxDB Token - org_name = "TJWATERORG" # 替换为你的Organization名称 - client = InfluxDBClient(url=url, token=token, org=org_name) + token = "Z4UZj9HuLwLlwoApywvT2nGVP3bwLy18y-sJQ7enzZlJd8YMzMWbBA6F-q4gBiZ-7-IqdxR5aR9LvicKiSNmnA==" # 替换为你的InfluxDB Token + org_name = "beibei" # 替换为你的Organization名称 - # 检查连接状态 - try: - create_and_initialize_buckets(client, org_name) - except Exception as e: - print(f"连接失败: {e}") - finally: - client.close() + client = InfluxDBClient(url=url, token=token) - with InfluxDBClient(url=url, token=token, org=org_name) as client: + # step1: 检查连接状态,初始化influxdb的buckets + # try: + # # delete_buckets(client, org_name) + # create_and_initialize_buckets(client, org_name) + # except Exception as e: + # print(f"连接失败: {e}") + # finally: + # client.close() - bucket_name = "realtime_data" # 数据存储的 bucket 名称 - node_id = "ZBBDTZDP000022" # 查询的节点 ID - link_id = "ZBBGXSZW000002" - # - # # latest_record = query_latest_record_by_ID(ID=node_id, type="node", bucket=bucket_name, client=client) - # latest_record = query_latest_record_by_ID(ID=link_id, type="link", bucket=bucket_name, client=client) - # - # if latest_record: - # print("最新记录:", latest_record) - # else: - # print("未找到符合条件的记录。") + # step2: 先查询pg数据库中scada_info的信息,然后存储SCADA数据到SCADA_data这个bucket里 + # query_pg_scada_info_realtime('bb') + # query_pg_scada_info_non_realtime('bb') + # 手动执行 + # store_realtime_SCADA_data_to_influxdb(get_real_value_time='2025-02-07T16:52:00+08:00') + # store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time='2025-02-01T12:00:00+08:00') + + # step3: 查询测试示例 + # with InfluxDBClient(url=url, token=token, org=org_name) as client: + # 示例1:query_latest_record_by_ID + # bucket_name = "realtime_simulation_result" # 数据存储的 bucket 名称 + # node_id = "ZBBDTZDP000022" # 查询的节点 ID + # link_id = "ZBBGXSZW000002" + # + # latest_record = query_latest_record_by_ID(ID=node_id, type="node", bucket=bucket_name, client=client) + # # latest_record = query_latest_record_by_ID(ID=link_id, type="link", bucket=bucket_name, client=client) + # + # if latest_record: + # print("最新记录:", latest_record) + # else: + # print("未找到符合条件的记录。") + + # 示例2:query_all_record_by_time # node_records, link_records = query_all_record_by_time(query_time="2024-11-25T06:00:00+08:00") # print("Node 数据:", node_records) # print("Link 数据:", link_records) - curve_result = query_curve_by_ID_property_daterange(ID=node_id, type="node", property="head", - start_date="2024-11-25", end_date="2024-11-25") - print(curve_result) + # 示例3:query_curve_by_ID_property_daterange + # curve_result = query_curve_by_ID_property_daterange(ID=node_id, type="node", property="head", + # start_date="2024-11-25", end_date="2024-11-25") + # print(curve_result) + + # 示例4:query_SCADA_data_by_device_ID_and_time + # SCADA_result_dict = query_SCADA_data_by_device_ID_and_time(globals.reservoir_liquid_level_realtime_ids, query_time='2024-12-13T11:30:00+08:00') + # print(SCADA_result_dict) diff --git a/main.py b/main.py index f5c91d9..9198dfe 100644 --- a/main.py +++ b/main.py @@ -7,7 +7,9 @@ from urllib.request import Request from xml.dom import minicompat from pydantic import BaseModel from starlette.responses import FileResponse, JSONResponse -from fastapi import FastAPI, File, UploadFile, Response, status, Request, Body, HTTPException +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.types import Receive +from fastapi import FastAPI, File, UploadFile, Response, status, Request, Body, HTTPException,Query from fastapi.responses import PlainTextResponse from fastapi.middleware.gzip import GZipMiddleware from tjnetwork import * @@ -15,8 +17,14 @@ import asyncio import threading import uvicorn from multiprocessing import Value +import uvicorn +from run_simulation import run_simulation, run_simulation_ex +from online_Analysis import * +import logging from fastapi.middleware.cors import CORSMiddleware import random +from datetime import datetime +import shutil import logging import redis import datetime @@ -39,6 +47,7 @@ tmpDir = "C:/tmpfiles/" lockedPrjs = {} + if not os.path.exists(inpDir): os.mkdir(inpDir) @@ -58,7 +67,11 @@ influx_org_name = "TJWATERORG" # 替换为你的Organization名称 influx_client = InfluxDBClient(url=influx_url, token=influx_token, org=influx_org_name) # 配置日志记录器 -logging.basicConfig(level=logging.INFO) +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' +) logger = logging.getLogger(__name__) # 配置 CORS 中间件 @@ -69,11 +82,69 @@ app.add_middleware( allow_methods=["*"], # 允许所有 HTTP 方法 allow_headers=["*"], # 允许所有 HTTP 头 ) + # 定义一个共享变量 lock_simulation = Value('i', 0) app.add_middleware(GZipMiddleware, minimum_size=1000) + +async def receive_with_body(body: bytes) -> Receive: + async def receive() -> dict: + return { + "type": "http.request", + "body": body, + "more_body": False, + } + return receive + + +@app.middleware("http") +async def log_requests(request: Request, call_next): + if request.url.path == "/favicon.ico": + return Response(status_code=204) + + # 记录接收请求的时间 + request_time = time.time() + request_time_str = datetime.fromtimestamp(request_time).strftime('%Y-%m-%d %H:%M:%S') + + # 判断是否为文件上传 + is_file_upload = request.headers.get("content-type", "").startswith("multipart/form-data") + + # 记录接收的请求数据 + logging.info(f"Received request: {request.method} {request.url} at {request_time_str}") + if not is_file_upload: + request_body = await request.body() + logging.info(f"Request body: {request_body.decode('utf-8')}") + + # 创建新的 Request 对象,传递缓存的请求体 + receive = await receive_with_body(request_body) + request = Request(request.scope, receive=receive) + else: + logging.info(f"Request body: File") + + # 处理请求 + response = await call_next(request) + + # 记录发送响应的时间 + response_time = time.time() + response_time_str = datetime.fromtimestamp(response_time).strftime('%Y-%m-%d %H:%M:%S') + processing_time = response_time - request_time + + # 记录发送的响应数据 + # response_body = b"" + # async for chunk in response.body_iterator: + # response_body += chunk + + # 记录响应的状态码以及是否成功 + success_status = response.status_code < 400 + logging.info(f"Response status: {response.status_code} at {response_time_str}, success: {success_status}") + # logging.info(f"Response body: {response_body.decode('utf-8')}") + logging.info(f"Processing time: {processing_time:.3f} seconds") + + return response + + @app.on_event("startup") async def startup_db(): logger.info('\n') @@ -82,7 +153,8 @@ async def startup_db(): logger.info("TJWater CloudService is starting...") logger.info('**********************************************************') logger.info('\n') - + + ############################################################ # extension_data ############################################################ @@ -382,7 +454,7 @@ async def fastapi_is_pipe(network: str, link: str) -> bool: @app.get('/ispump/') async def fastapi_is_pump(network: str, link: str) -> bool: - return is_pump(network, lin) + return is_pump(network, link) @app.get('/isvalve/') async def fastapi_is_valve(network: str, link: str) -> bool: @@ -1712,7 +1784,7 @@ async def fastapi_get_scada_element_schema(network: str) -> dict[str, dict[str, @app.get('/getscadaelements/') async def fastapi_get_scada_elements(network: str) -> list[str]: - return get_all_scada_elements(network) + return get_scada_elements(network) @app.get('/getscadaelement/') async def fastapi_get_scada_element(network: str, id: str) -> dict[str, Any]: @@ -2050,8 +2122,492 @@ def generate_openapi_json(): with open(openapi_json_path, "w") as file: json.dump(app.openapi(), file, indent=4) - + + + +############################################################ +# real_time api 37 +# example: http://127.0.0.1:8000/runsimulation?network=beibeizone&start_time=2024-04-01T08:00:00Z +############################################################ +# 必须用这个PlainTextResponse,不然每个key都有引号 +# @app.get("/runsimulation/", response_class = PlainTextResponse) +# async def fastapi_run_project(network: str,start_time:str,end_time=None) -> str: +# filename = 'c:/lock.simulation' +# filename2 = 'c:/lock.simulation2' +# if os.path.exists(filename2): +# print('file exists') +# raise HTTPException(status_code=409, detail="is in simulation") +# else: +# print('file doesnt exists') +# #os.rename(filename, filename2) +# result = run_simulation(network,start_time,end_time) +# #os.rename(filename2, filename) +# return result + + +############################################################ +# real_time api 37 +# example: http://127.0.0.1:8000/runsimulation?network=beibeizone&start_time=2024-04-01T08:00:00Z +############################################################ + + +# 必须用这个PlainTextResponse,不然每个key都有引号 +@app.get("/runsimulation/", response_class = PlainTextResponse) +async def fastapi_run_project(network: str,start_time:str,end_time=None) -> str: + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + #os.rename(filename, filename2) + result = run_simulation_ex(name=network, simulation_type='realtime', + start_datetime=start_time, end_datetime=end_time) + #os.rename(filename2, filename) + return result + +############################################################ +# real_time api 37.5 +# example: +# response = requests.post("http://127.0.0.1:8000/runsimulation", +# data=json.dumps({'network': 'bb_server', 'simulation_type': 'extended', +# 'start_time': '2024-05-17T09:30:00Z', 'duration': 900, +# 'pump_control': {'1#': [0, 0], '2#': [1, 1], '3#': [1, 1], '4#': [1, 0], +# '5#': [45, 43], '6#': [0, 0], '7#': [0, 0]}}), +# headers={'accept': 'application/json', 'Content-Type': 'application/json'}) +############################################################ + + +# class RunSimuItem(BaseModel): +# network: str +# simulation_type: str +# start_time: str +# end_time: Optional[str] = None +# duration: Optional[int] = 900 +# pump_control: Optional[dict] = None +# +# +# @app.post("/runsimulation/") +# async def fastapi_run_project(item: RunSimuItem) -> str: +# item = item.dict() +# filename = 'c:/lock.simulation' +# filename2 = 'c:/lock.simulation2' +# if os.path.exists(filename2): +# print('file exists') +# raise HTTPException(status_code=409, detail="is in simulation") +# else: +# print('file doesnt exists') +# #os.rename(filename, filename2) +# result = run_simulation_ex(item['network'], item['simulation_type'], +# item['start_time'], item['end_time'], +# item['duration'], item['pump_control']) +# #os.rename(filename2, filename) +# return result + + +############################################################ +# burst analysis api 38 +#example:http://127.0.0.1:8000/burst_analysis?network=beibeizone&start_time=2024-04-01T08:00:00Z&burst_ID=ZBBGXSZW000001&burst_size=200&duration=1800 +############################################################ + +# @app.get("/burst_analysis/", response_class = PlainTextResponse) +# async def fastapi_burst_analysis(network: str,start_time:str,burst_ID:str,burst_size:float,burst_flow:float=None,duration:int=None) -> str: +# filename = 'c:/lock.simulation' +# filename2 = 'c:/lock.simulation2' +# if os.path.exists(filename2): +# print('file exists') +# raise HTTPException(status_code=409, detail="is in simulation") +# else: +# print('file doesnt exists') +# #os.rename(filename, filename2) +# result = burst_analysis(network,start_time,burst_ID,burst_size,burst_flow,duration) +# #os.rename(filename2, filename) +# return result + + +############################################################ +# burst analysis api 38.5 +# example: +# response = requests.post("http://127.0.0.1:8000/burst_analysis", +# data=json.dumps({'network': 'bb_server', +# 'start_time': '2024-05-17T09:30:00Z', +# 'burst_ID': ['ZBBGXSZW000001'], +# 'burst_size': [200], +# 'duration': 1800, +# 'pump_control': {'1#': [0, 0, 0], '2#': [1, 1, 1], '3#': [1, 1, 1], '4#': [1, 1, 1], +# '5#': [45, 45, 45], '6#': [0, 0, 0], '7#': [0, 0, 0]} +# 'valve_closed': ['GSD2307192058576667FF7B41FF']), +# headers={'accept': 'application/json', 'Content-Type': 'application/json'}) +############################################################ + + +class BurstAnalysis(BaseModel): + network: str + start_time: str + burst_ID: list[str] | str + burst_size: list[float] | float + duration: int + pump_control: Optional[dict] = None + valve_closed: Optional[list] = None + + +@app.post("/burst_analysis/") +async def fastapi_burst_analysis(data: BurstAnalysis) -> str: + item = data.dict() + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + #os.rename(filename, filename2) + result = burst_analysis(prj_name=item['network'], + date_time=item['start_time'], + burst_ID=item['burst_ID'], + burst_size=item['burst_size'], + duration=item['duration'], + pump_control=item['pump_control'], + valve_closed=item['valve_closed']) + #os.rename(filename2, filename) + return result + +############################################################ +# valve close analysis api 39 +#example:http://127.0.0.1:8000/valve_close_analysis?network=beibeizone&start_time=2024-04-01T08:00:00Z&valves=GSD2307192058577780A3287D78&valves=GSD2307192058572E953B707226(S2)&duration=1800 +############################################################ + +@app.get("/valve_close_analysis/", response_class = PlainTextResponse) +async def fastapi_valve_close_analysis(network: str,start_time:str,valves:Annotated[list[str], Query()],duration:int=None) -> str: + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + #os.rename(filename, filename2) + result = valve_close_analysis(network,start_time,valves,duration) + #os.rename(filename2, filename) + return result + +############################################################ +# pipe flushing analysis api 40 +#example:http://127.0.0.1:8000/flushing_analysis?network=beibeizone&start_time=2024-04-01T08:00:00Z&valves=GSD230719205857733F8F5214FF&valves=GSD230719205857C0AF65B6A170&valves_k=0.5&valves_k=0.5&drainage_node_ID=GSD2307192058570DEDF28E4F73&flush_flow=0&duration=1800 +############################################################ + +@app.get("/flushing_analysis/", response_class = PlainTextResponse) +async def fastapi_flushing_analysis(network: str,start_time:str,valves:Annotated[list[str], Query()],valves_k:Annotated[list[float], Query()],drainage_node_ID:str,flush_flow:float=0,duration:int=None) -> str: + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + #os.rename(filename, filename2) + result = flushing_analysis(network,start_time,valves,valves_k,drainage_node_ID,flush_flow,duration) + #os.rename(filename2, filename) + return result + + + +############################################################ +# contaminant_simulation api 41 +#example:http://127.0.0.1:8000/contaminant_simulation?network=beibeizone&start_time=2024-04-01T08:00:00Z&source=ZBBDTZDP002677&concentration=100&duration=1800 +############################################################ + +@app.get("/contaminant_simulation/", response_class = PlainTextResponse) +async def fastapi_contaminant_simulation(network: str,start_time:str,source:str,concentration:float,duration:int=900,pattern:str=None) -> str: + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + #os.rename(filename, filename2) + result = contaminant_simulation(network,start_time,source,concentration,duration,pattern) + #os.rename(filename2, filename) + return result + +############################################################ +# age analysis api 42 +#example:http://127.0.0.1:8000/age_analysis/?network=bb&start_time=2024-04-01T00:00:00Z&end_time=2024-04-01T08:00:00Z&duration=28800 +############################################################ + + +@app.get("/age_analysis/", response_class = PlainTextResponse) +async def fastapi_age_analysis(network: str, start_time:str, end_time:str, duration:int) -> str: + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + #os.rename(filename, filename2) + result = age_analysis(network,start_time,end_time,duration) + #os.rename(filename2, filename) + return result + + +############################################################ +# scheduling analysis api 43 +############################################################ + + +class SchedulingAnalysis(BaseModel): + network: str + start_time: str + pump_control: dict + tank_id: str + water_plant_output_id: str + time_delta: Optional[int] = 300 + + +@app.post("/scheduling_analysis/") +async def fastapi_scheduling_analysis(data: SchedulingAnalysis) -> str: + data = data.dict() + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + #os.rename(filename, filename2) + result = scheduling_simulation(data['network'], data['start_time'], + data['pump_control'], data['tank_id'], + data['water_plant_output_id'], data['time_delta']) + #os.rename(filename2, filename) + return result + + +############################################################ +# pressure_regulating api 44 +# example: +# response = requests.post("http://127.0.0.1:8000/pressure_regulating", +# data=json.dumps({'network': 'bb_server', +# 'start_time': '2024-05-17T09:30:00Z', +# 'pump_control': {'1#': [0, 0], '2#': [1, 1], '3#': [1, 1], '4#': [1, 1], +# '5#': [45, 45], '6#': [0, 0], '7#': [0, 0]} +# 'tank_init_level': {'ZBBDTJSC000002': 2, 'ZBBDTJSC000001': 2}}), +# headers={'accept': 'application/json', 'Content-Type': 'application/json'}) +############################################################ + + +class PressureRegulation(BaseModel): + network: str + start_time: str + pump_control: dict + tank_init_level: Optional[dict] = None + + +@app.post("/pressure_regulation/") +async def fastapi_pressure_regulation(data: PressureRegulation) -> str: + item = data.dict() + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + #os.rename(filename, filename2) + result = pressure_regulation(prj_name=item['network'], + start_datetime=item['start_time'], + pump_control=item['pump_control'], + tank_initial_level_control=item['tank_init_level']) + #os.rename(filename2, filename) + return result + + +############################################################ +# project_management api 45 +# example: +# response = requests.post("http://127.0.0.1:8000/project_management", +# data=json.dumps({'network': 'bb_server', +# 'start_time': '2024-05-17T00:00:00Z', +# 'pump_control': +# {'1#':(list:97), '2#':(list:97), '3#':(list:97), '4#':(list:97), +# '5#':(list:97), '6#':(list:97), '7#':(list:97)} +# 'tank_init_level': {'ZBBDTJSC000002': 2, 'ZBBDTJSC000001': 2} +# 'region_demand': {'hp': 150000, 'lp': 40000}}), +# headers={'accept': 'application/json', 'Content-Type': 'application/json'}) +############################################################ + + +class ProjectManagement(BaseModel): + network: str + start_time: str + pump_control: dict + tank_init_level: Optional[dict] = None + region_demand: Optional[dict] = None + + +@app.post("/project_management/") +async def fastapi_project_management(data: ProjectManagement) -> str: + item = data.dict() + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + #os.rename(filename, filename2) + result = project_management(prj_name=item['network'], + start_datetime=item['start_time'], + pump_control=item['pump_control'], + tank_initial_level_control=item['tank_init_level'], + region_demand_control=item['region_demand']) + #os.rename(filename2, filename) + return result + + +############################################################ +# project_management api 46 +# example: +# with open('./inp/bb_temp.inp', 'rb') as file: +# response = requests.post("http://127.0.0.1:8000/network_project", +# files={'file': file}) +############################################################ + + +@app.post("/network_project/") +async def fastapi_network_project(file: UploadFile = File()) -> str: + temp_file_path = './inp/' + if not os.path.exists(temp_file_path): + os.mkdir(temp_file_path) + temp_file_name = f'network_project_{datetime.now().strftime("%Y%m%d")}' + temp_file_path = f'{temp_file_path}{temp_file_name}.inp' + + with open(temp_file_path, "wb") as buffer: + shutil.copyfileobj(file.file, buffer) + buffer.close() + + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + result = run_inp(temp_file_name) + #os.rename(filename2, filename) + return result + + +############################################################ +# daily scheduling analysis api 47 +############################################################ + + +class DailySchedulingAnalysis(BaseModel): + network: str + start_time: str + pump_control: dict + reservoir_id: str + tank_id: str + water_plant_output_id: str + time_delta: Optional[int] = 300 + + +@app.post("/daily_scheduling_analysis/") +async def fastapi_daily_scheduling_analysis(data: DailySchedulingAnalysis) -> str: + data = data.dict() + filename = 'c:/lock.simulation' + filename2 = 'c:/lock.simulation2' + if os.path.exists(filename2): + print('file exists') + raise HTTPException(status_code=409, detail="is in simulation") + else: + print('file doesnt exists') + #os.rename(filename, filename2) + result = daily_scheduling_simulation(data['network'], data['start_time'], + data['pump_control'], data['reservoir_id'], data['tank_id'], + data['water_plant_output_id']) + #os.rename(filename2, filename) + return result + + +############################################################ +# network_update api 48 +############################################################ + + +@app.post("/network_update/") +async def fastapi_network_update(file: UploadFile = File()) -> str: + # 默认文件夹 + default_folder = './' + + # 使用当前时间生成临时文件名 + temp_file_name = f'network_update_{datetime.now().strftime("%Y%m%d")}' + temp_file_path = os.path.join(default_folder, temp_file_name) + + # 保存上传的文件到服务器 + try: + with open(temp_file_path, "wb") as buffer: + shutil.copyfileobj(file.file, buffer) + buffer.close() + print(f"文件 {temp_file_name} 已成功保存。") + except Exception as e: + raise HTTPException(status_code=500, detail=f"文件保存失败: {e}") + + # 更新数据库 + try: + network_update(temp_file_path) + return json.dumps({"message": "管网更新成功"}) + except Exception as e: + raise HTTPException(status_code=500, detail=f"数据库操作失败: {e}") + + +############################################################ +# pump failure api 49 +############################################################ + + +class PumpFailureState(BaseModel): + time: str + pump_status: dict + + +@app.post("/pump_failure/") +async def fastapi_pump_failure(data: PumpFailureState) -> str: + item = data.dict() + + with open('./pump_failure_message.txt', 'a', encoding='utf-8-sig') as f1: + f1.write('[{}] {}\n'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), item)) # save message + + status_info = item.copy() + with open('./pump_failure_status.txt', 'r', encoding='utf-8-sig') as f2: + lines = f2.readlines() + first_stage_pump_status_dict = json.loads(json.dumps(eval(lines[0]))) + second_stage_pump_status_dict = json.loads(json.dumps(eval(lines[-1]))) # read local file + pump_status_dict = {'first': first_stage_pump_status_dict, # first-stage pump + 'second': second_stage_pump_status_dict} # second-stage pump + for pump_type in status_info['pump_status'].keys(): # 'first' or 'second' + if pump_type in pump_status_dict.keys(): # the type of pumps exists + if all(pump_id in pump_status_dict[pump_type].keys() + for pump_id in status_info['pump_status'][pump_type].keys()): # all pump IDs exist + for pump_id in status_info['pump_status'][pump_type].keys(): + pump_status_dict[pump_type][pump_id] = int( + status_info['pump_status'][pump_type][pump_id]) # modify status dict + else: + return json.dumps('ERROR: Wrong Pump ID') + else: + return json.dumps('ERROR: Wrong Pump Type') + + with open('./pump_failure_status.txt', 'w', encoding='utf-8-sig') as f2_: + f2_.write('{}\n{}'.format(pump_status_dict['first'], pump_status_dict['second'])) # save local file + + return json.dumps('SUCCESS') + + + # DingZQ, 2024-12-31, run main # if __name__ == "__main__": # generate_openapi_json() -# uvicorn.run(app, host="127.0.0.1", port=80) +# uvicorn.run(app, host="127.0.0.1", port=80) \ No newline at end of file diff --git a/online_Analysis.py b/online_Analysis.py new file mode 100644 index 0000000..bbaf20d --- /dev/null +++ b/online_Analysis.py @@ -0,0 +1,776 @@ +import os +from tjnetwork import * +from api.project import CopyProjectEx +from run_simulation import run_simulation_ex, from_clock_to_seconds_2 +from math import sqrt, pi +from epanet.epanet import Output +import json +from datetime import datetime +import time +import pytz +import psycopg +from psycopg import sql +import pandas as pd +import csv +import chardet + + +############################################################ +# burst analysis 01 +############################################################ + +def burst_analysis(prj_name, date_time, burst_ID: list | str, + burst_size: list | float | int = None, duration=900, pump_control=None, valve_closed=None) -> str: + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Analysis.") + new_name = f'burst_Anal_{prj_name}' + + if have_project(new_name): + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + if is_project_open(prj_name): + close_project(prj_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Copying Database.") + CopyProjectEx()(prj_name, new_name, + ['operation', 'current_operation', 'restore_operation', 'batch_operation', 'operation_table']) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Opening Database.") + open_project(new_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Database Loading OK.") + + ##step 1 set the emitter coefficient of end node of busrt pipe + + if isinstance(burst_ID, list): + if (burst_size is not None) and (type(burst_size) is not list): + return json.dumps('Type mismatch.') + elif isinstance(burst_ID, str): + burst_ID = [burst_ID] + if burst_size is not None: + if isinstance(burst_size, float) or isinstance(burst_size, int): + burst_size = [burst_size] + else: + return json.dumps('Type mismatch.') + else: + return json.dumps('Type mismatch.') + + if burst_size is None: + burst_size = [-1] * len(burst_ID) + elif len(burst_size) < len(burst_ID): + burst_size += [-1] * (len(burst_ID) - len(burst_size)) + elif len(burst_size) > len(burst_ID): + # burst_size = burst_size[:len(burst_ID)] + return json.dumps('Length mismatch.') + + for burst_ID_, burst_size_ in zip(burst_ID, burst_size): + pipe = get_pipe(new_name, burst_ID_) + str_start_node = pipe['node1'] + str_end_node = pipe['node2'] + d_pipe = pipe['diameter'] / 1000.0 + if burst_size_ <= 0: + burst_size_ = 3.14 * d_pipe * d_pipe / 4 / 8 + else: + burst_size_ = burst_size_ / 10000 + + emitter_coeff = 0.65 * burst_size_ * sqrt(19.6) * 1000#1/8开口面积作为coeff + emitter_node = '' + if is_junction(new_name, str_end_node): + emitter_node = str_end_node + elif is_junction(new_name, str_start_node): + emitter_node = str_start_node + + old_emitter = get_emitter(new_name, emitter_node) + if(old_emitter != None): + old_emitter['coefficient'] = emitter_coeff #爆管的emitter coefficient设置 + else: + old_emitter = {'junction': emitter_node, 'coefficient': emitter_coeff} + + new_emitter = ChangeSet() + new_emitter.append(old_emitter) + set_emitter(new_name, new_emitter) + + #step 2. run simulation + + # 涉及关阀计算,可能导致关阀后仍有流量,改为压力驱动PDA + options = get_option(new_name) + options['DEMAND MODEL'] = OPTION_DEMAND_MODEL_PDA + options['REQUIRED PRESSURE'] = '20.0000' + cs_options = ChangeSet() + cs_options.append(options) + set_option(new_name, cs_options) + + valve_control = None + if valve_closed is not None: + valve_control = {} + for valve in valve_closed: + valve_control[valve] = {'status': 'CLOSED'} + + result = run_simulation_ex(new_name,'realtime', date_time, + end_datetime=date_time, + duration=duration, + pump_control=pump_control, + valve_control=valve_control, + downloading_prohibition=True) + + #step 3. restore the base model status + # execute_undo(prj_name) #有疑惑 + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + + return result + + +############################################################ +# valve closing analysis 02 +############################################################ +def valve_close_analysis(prj_name, date_time, valves, duration=None)->str: + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Analysis.") + new_name = f'valve_close_Anal_{prj_name}' + + if have_project(new_name): + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + if is_project_open(prj_name): + close_project(prj_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Copying Database.") + CopyProjectEx()(prj_name, new_name, + ['operation', 'current_operation', 'restore_operation', 'batch_operation', 'operation_table']) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Opening Database.") + open_project(new_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Database Loading OK.") + + #step 1. change the valves status to 'closed' + + for valve in valves: + if not is_valve(new_name,valve): + result='ID:{}is not a valve type'.format(valve) + return result + cs=ChangeSet() + status=get_status(new_name,valve) + status['status']='CLOSED' + cs.append(status) + set_status(new_name,cs) + + #step 2. run simulation + + # 涉及关阀计算,可能导致关阀后仍有流量,改为压力驱动PDA + options = get_option(new_name) + options['DEMAND MODEL'] = OPTION_DEMAND_MODEL_PDA + options['REQUIRED PRESSURE'] = '20.0000' + cs_options = ChangeSet() + cs_options.append(options) + set_option(new_name, cs_options) + + result = run_simulation_ex(new_name,'realtime', date_time, date_time, duration, + downloading_prohibition=True) + + #step 3. restore the base model + # for valve in valves: + # execute_undo(prj_name) + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + + return result + + +############################################################ +# flushing analysis 03 +#Pipe_Flushing_Analysis(prj_name,date_time, Valve_id_list, Drainage_Node_Id, Flushing_flow[opt], Flushing_duration[opt])->out_file:string +############################################################ +def flushing_analysis(prj_name, date_time, valves, valves_k, drainage_node_ID, flushing_flow=0, duration=None)->str: + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Analysis.") + new_name = f'flushing_Anal_{prj_name}' + + if have_project(new_name): + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + if is_project_open(prj_name): + close_project(prj_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Copying Database.") + CopyProjectEx()(prj_name, new_name, + ['operation', 'current_operation', 'restore_operation', 'batch_operation', 'operation_table']) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Opening Database.") + open_project(new_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Database Loading OK.") + + if not is_junction(new_name,drainage_node_ID): + return 'Wrong Drainage node type' + + #step 1. change the valves status to 'closed' + for valve, valve_k in zip(valves, valves_k): + cs=ChangeSet() + status=get_status(new_name,valve) + # status['status']='CLOSED' + if valve_k == 0: + status['status'] = 'CLOSED' + elif valve_k < 1: + status['status'] = 'OPEN' + status['setting'] = 0.1036 * pow(valve_k, -3.105) + cs.append(status) + set_status(new_name,cs) + + #step 2. set the emitter coefficient of drainage node or add flush flow to the drainage node + emitter_demand=get_demand(new_name,drainage_node_ID) + cs=ChangeSet() + if flushing_flow>0: + for r in emitter_demand['demands']: + r['demand']+=(flushing_flow/3.6) + cs.append(emitter_demand) + set_demand(new_name,cs) + else: + pipes=get_node_links(new_name,drainage_node_ID) + flush_diameter=50 + for pipe in pipes: + d=get_pipe(new_name,pipe)['diameter'] + if flush_diameterstr: + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Analysis.") + new_name = f'contaminant_Sim_{prj_name}' + + if have_project(new_name): + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + if is_project_open(prj_name): + close_project(prj_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Copying Database.") + CopyProjectEx()(prj_name, new_name, + ['operation', 'current_operation', 'restore_operation', 'batch_operation', 'operation_table']) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Opening Database.") + open_project(new_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Database Loading OK.") + + dic_time = get_time(new_name) + dic_time['QUALITY TIMESTEP'] = '0:05:00' + cs = ChangeSet() + cs.operations.append(dic_time) + set_time(new_name, cs) # set QUALITY TIMESTEP + + time_option=get_time(new_name) + hydraulic_step=time_option['HYDRAULIC TIMESTEP'] + secs=from_clock_to_seconds_2(hydraulic_step) + operation_step=0 + #step 1. set duration + if duration==None: + duration=secs + #step 2. set pattern + if pattern!=None: + pt=get_pattern(new_name,pattern) + if pt==None: + str_response=str('cant find pattern') + return str_response + else: + cs_pattern=ChangeSet() + pt={} + factors=[] + tmp_duration=duration + while tmp_duration>0: + factors.append(1.0) + tmp_duration=tmp_duration-secs + pt['id']='contam_pt' + pt['factors']=factors + cs_pattern.append(pt) + add_pattern(new_name,cs_pattern) + operation_step+=1 + + #step 3. set source/initial quality + # source quality + cs_source=ChangeSet() + source_schema={'node':source,'s_type':SOURCE_TYPE_CONCEN,'strength':concentration,'pattern':pt['id']} + cs_source.append(source_schema) + source_node=get_source(new_name,source) + if len(source_node)==0: + add_source(new_name,cs_source) + else: + set_source(new_name,cs_source) + + dict_demand = get_demand(new_name, source) + for demands in dict_demand['demands']: + dict_demand['demands'][dict_demand['demands'].index(demands)]['demand'] = -1 + dict_demand['demands'][dict_demand['demands'].index(demands)]['pattern'] = None + cs = ChangeSet() + cs.append(dict_demand) + set_demand(new_name, cs) # set inflow node + + # # initial quality + # dict_quality = get_quality(new_name, source) + # dict_quality['quality'] = concentration + # cs = ChangeSet() + # cs.append(dict_quality) + # set_quality(new_name, cs) + + operation_step+=1 + + #step 4 set option of quality to chemical + opt=get_option(new_name) + opt['QUALITY']=OPTION_QUALITY_CHEMICAL + cs_option=ChangeSet() + cs_option.append(opt) + set_option(new_name,cs_option) + operation_step+=1 + + #step 5. run simulation + result = run_simulation_ex(new_name,'realtime', date_time, date_time, duration, + downloading_prohibition=True) + + # for i in range(1,operation_step): + # execute_undo(prj_name) + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + return result + +############################################################ +# age analysis 05 +############################################################ + + +def age_analysis(prj_name, start_time, end_time, duration) -> str: + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Analysis.") + new_name = f'age_Anal_{prj_name}' + + if have_project(new_name): + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + if is_project_open(prj_name): + close_project(prj_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Copying Database.") + CopyProjectEx()(prj_name, new_name, + ['operation', 'current_operation', 'restore_operation', 'batch_operation', 'operation_table']) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Opening Database.") + open_project(new_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Database Loading OK.") + + # step 1. run simulation + + result = run_simulation_ex(new_name, 'realtime', start_time, end_time, duration, + downloading_prohibition=True) + + # step 2. restore the base model status + # execute_undo(prj_name) #有疑惑 + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + + output = Output("./temp/{}.db.out".format(new_name)) + + # element_name = output.element_name() + # node_name = element_name['nodes'] + # link_name = element_name['links'] + + nodes_age = [] + node_result = output.node_results() + for node in node_result: + nodes_age.append(node['result'][-1]['quality']) + + links_age = [] + link_result = output.link_results() + for link in link_result: + links_age.append(link['result'][-1]['quality']) + + age_result = {'nodes': nodes_age, 'links': links_age} + # age_result = {'nodes': nodes_age, 'links': links_age, 'nodeIDs': node_name, 'linkIDs': link_name} + + return json.dumps(age_result) + + +############################################################ +# pressure regulation 06 +############################################################ + + +def pressure_regulation(prj_name, start_datetime, pump_control, tank_initial_level_control=None) -> str: + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Analysis.") + new_name = f'pressure_regulation_{prj_name}' + + if have_project(new_name): + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + if is_project_open(prj_name): + close_project(prj_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Copying Database.") + CopyProjectEx()(prj_name, new_name, + ['operation', 'current_operation', 'restore_operation', 'batch_operation', 'operation_table']) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Opening Database.") + open_project(new_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Database Loading OK.") + + # 全部关泵后,压力计算不合理,改为压力驱动PDA + options = get_option(new_name) + options['DEMAND MODEL'] = OPTION_DEMAND_MODEL_PDA + options['REQUIRED PRESSURE'] = '15.0000' + cs_options = ChangeSet() + cs_options.append(options) + set_option(new_name, cs_options) + + result = run_simulation_ex(name=new_name, + simulation_type='realtime', + start_datetime=start_datetime, + duration=900, + pump_control=pump_control, + tank_initial_level_control=tank_initial_level_control, + downloading_prohibition=True) + + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + + return result + + +############################################################ +# project management 07 +############################################################ + + +def project_management(prj_name, start_datetime, pump_control, + tank_initial_level_control=None, region_demand_control=None) -> str: + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Analysis.") + new_name = f'project_management_{prj_name}' + + if have_project(new_name): + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + if is_project_open(prj_name): + close_project(prj_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Copying Database.") + CopyProjectEx()(prj_name, new_name, + ['operation', 'current_operation', 'restore_operation', 'batch_operation', 'operation_table']) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Opening Database.") + open_project(new_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Database Loading OK.") + + result = run_simulation_ex(name=new_name, + simulation_type='realtime', + start_datetime=start_datetime, + duration=86400, + pump_control=pump_control, + tank_initial_level_control=tank_initial_level_control, + region_demand_control=region_demand_control, + downloading_prohibition=True) + + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + + return result + + +############################################################ +# scheduling analysis 08 +############################################################ + + +def scheduling_simulation(prj_name, start_time, pump_control, tank_id, water_plant_output_id, time_delta=300) -> str: + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Analysis.") + new_name = f'scheduling_{prj_name}' + + if have_project(new_name): + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + if is_project_open(prj_name): + close_project(prj_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Copying Database.") + CopyProjectEx()(prj_name, new_name, + ['operation', 'current_operation', 'restore_operation', 'batch_operation', 'operation_table']) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Opening Database.") + open_project(new_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Database Loading OK.") + + run_simulation_ex(new_name, 'realtime', start_time, duration=0, pump_control=pump_control) + + if not is_project_open(new_name): + open_project(new_name) + + tank = get_tank(new_name, tank_id) # 水塔信息 + tank_floor_space = pi * pow(tank['diameter'] / 2, 2) # 水塔底面积(m^2) + tank_init_level = tank['init_level'] # 水塔初始水位(m) + tank_pipes_id = tank['links'] # pipes list + + tank_pipe_flow_direction = {} # 管道流向修正系数, 水塔为下游节点时为1, 水塔为上游节点时为-1 + for pipe_id in tank_pipes_id: + if get_pipe(new_name, pipe_id)['node2'] == tank_id: # 水塔为下游节点 + tank_pipe_flow_direction[pipe_id] = 1 + else: + tank_pipe_flow_direction[pipe_id] = -1 + + output = Output("./temp/{}.db.out".format(new_name)) + + node_results = output.node_results() # [{'node': str, 'result': [{'pressure': float}]}] + water_plant_output_pressure = 0 + for node_result in node_results: + if node_result['node'] == water_plant_output_id: # 水厂出水压力(m) + water_plant_output_pressure = node_result['result'][-1]['pressure'] + water_plant_output_pressure /= 100 # 预计水厂出水压力(Mpa) + + pipe_results = output.link_results() # [{'link': str, 'result': [{'flow': float}]}] + tank_inflow = 0 + for pipe_result in pipe_results: + for pipe_id in tank_pipes_id: # 遍历与水塔相连的管道 + if pipe_result['link'] == pipe_id: # 水塔入流流量(L/s) + tank_inflow += pipe_result['result'][-1]['flow'] * tank_pipe_flow_direction[pipe_id] + tank_inflow /= 1000 # 水塔入流流量(m^3/s) + tank_level_delta = tank_inflow * time_delta / tank_floor_space # 水塔水位改变值(m) + tank_level = tank_init_level + tank_level_delta # 预计水塔水位(m) + + simulation_results = {'water_plant_output_pressure': water_plant_output_pressure, + 'tank_init_level': tank_init_level, + 'tank_level': tank_level} + + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + + return json.dumps(simulation_results) + + +def daily_scheduling_simulation(prj_name, start_time, pump_control, + reservoir_id, tank_id, water_plant_output_id) -> str: + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Analysis.") + new_name = f'daily_scheduling_{prj_name}' + + if have_project(new_name): + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + if is_project_open(prj_name): + close_project(prj_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Copying Database.") + CopyProjectEx()(prj_name, new_name, + ['operation', 'current_operation', 'restore_operation', 'batch_operation', 'operation_table']) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Start Opening Database.") + open_project(new_name) + print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Database Loading OK.") + + run_simulation_ex(new_name, 'realtime', start_time, duration=86400, pump_control=pump_control) + + if not is_project_open(new_name): + open_project(new_name) + + output = Output("./temp/{}.db.out".format(new_name)) + + node_results = output.node_results() # [{'node': str, 'result': [{'pressure': float, 'head': float}]}] + water_plant_output_pressure = [] + reservoir_level = [] + tank_level = [] + for node_result in node_results: + if node_result['node'] == water_plant_output_id: + for result in node_result['result']: + water_plant_output_pressure.append(result['pressure'] / 100) # 水厂出水压力(Mpa) + elif node_result['node'] == reservoir_id: + for result in node_result['result']: + reservoir_level.append(result['head'] - 250.35) # 清水池液位(m) + elif node_result['node'] == tank_id: + for result in node_result['result']: + tank_level.append(result['pressure']) # 调节池液位(m) + + simulation_results = {'water_plant_output_pressure': water_plant_output_pressure, + 'reservoir_level': reservoir_level, + 'tank_level': tank_level} + + if is_project_open(new_name): + close_project(new_name) + delete_project(new_name) + + return json.dumps(simulation_results) + +############################################################ +# network_update 10 +############################################################ + +def network_update(file_path: str) -> None: + read_inp('bb', file_path) + + csv_path = './history_pattern_flow.csv' + + # # 检查文件是否存在 + # if os.path.exists(csv_path): + # print(f"history_patterns_flows文件存在,开始处理...") + # + # # 读取 CSV 文件 + # df = pd.read_csv(csv_path) + # + # # 连接到 PostgreSQL 数据库(这里是数据库 "bb") + # with psycopg.connect("dbname=bb host=127.0.0.1") as conn: + # with conn.cursor() as cur: + # for index, row in df.iterrows(): + # # 直接将数据插入,不进行唯一性检查 + # insert_sql = sql.SQL(""" + # INSERT INTO history_patterns_flows (id, factor, flow) + # VALUES (%s, %s, %s); + # """) + # # 将数据插入数据库 + # cur.execute(insert_sql, (row['id'], row['factor'], row['flow'])) + # conn.commit() + # print("数据成功导入到 'history_patterns_flows' 表格。") + # else: + # print(f"history_patterns_flows文件不存在。") + # 检查文件是否存在 + if os.path.exists(csv_path): + print(f"history_patterns_flows文件存在,开始处理...") + + # 连接到 PostgreSQL 数据库(这里是数据库 "bb") + with psycopg.connect("dbname=bb host=127.0.0.1") as conn: + with conn.cursor() as cur: + with open(csv_path, newline='', encoding='utf-8-sig') as csvfile: + reader = csv.DictReader(csvfile) + for row in reader: + # 直接将数据插入,不进行唯一性检查 + insert_sql = sql.SQL(""" + INSERT INTO history_patterns_flows (id, factor, flow) + VALUES (%s, %s, %s); + """) + # 将数据插入数据库 + cur.execute(insert_sql, (row['id'], row['factor'], row['flow'])) + conn.commit() + print("数据成功导入到 'history_patterns_flows' 表格。") + else: + print(f"history_patterns_flows文件不存在。") + + +def submit_scada_info(name: str, coord_id: str) -> None: + """ + 将scada信息表导入pg数据库 + :param name: 项目名称(数据库名称) + :param coord_id: 坐标系的id,如4326,根据原始坐标信息输入 + :return: + """ + scada_info_path = './scada_info.csv' + # 检查文件是否存在 + if os.path.exists(scada_info_path): + print(f"scada_info文件存在,开始处理...") + + # 自动检测文件编码 + with open(scada_info_path, 'rb') as file: + raw_data = file.read() + detected = chardet.detect(raw_data) + file_encoding = detected['encoding'] + print(f"检测到的文件编码:{file_encoding}") + try: + # 动态替换数据库名称 + conn_string = f"dbname={name} host=127.0.0.1" + + # 连接到 PostgreSQL 数据库(这里是数据库 "bb") + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 检查 scada_info 表是否为空 + cur.execute("SELECT COUNT(*) FROM scada_info;") + count = cur.fetchone()[0] + + if count > 0: + print("scada_info表中已有数据,正在清空记录...") + cur.execute("DELETE FROM scada_info;") + print("表记录已清空。") + + with open(scada_info_path, newline='', encoding=file_encoding) as csvfile: + reader = csv.DictReader(csvfile) + for row in reader: + # 将CSV单元格值为空的字段转换为 None + cleaned_row = {key: (value if value.strip() else None) for key, value in row.items()} + + # 处理 associated_source_outflow_id 列动态变化 + associated_columns = [f"associated_source_outflow_id{i}" for i in range(1, 21)] + associated_values = [ + (cleaned_row.get(col).strip() if cleaned_row.get(col) and cleaned_row.get( + col).strip() else None) + for col in associated_columns + ] + + # 将 X_coor 和 Y_coor 转换为 geometry 类型 + x_coor = float(cleaned_row['X_coor']) if cleaned_row['X_coor'] else None + y_coor = float(cleaned_row['Y_coor']) if cleaned_row['Y_coor'] else None + coord = f"SRID={coord_id};POINT({x_coor} {y_coor})" if x_coor and y_coor else None + + # 准备插入 SQL 语句 + insert_sql = sql.SQL(""" + INSERT INTO scada_info ( + id, type, associated_element_id, associated_pattern, + associated_pipe_flow_id, {associated_columns}, + API_query_id, transmission_mode, transmission_frequency, + X_coor, Y_coor, coord + ) + VALUES ( + %s, %s, %s, %s, %s, {associated_placeholders}, + %s, %s, %s, %s, %s, %s + ); + """).format( + associated_columns=sql.SQL(", ").join(sql.Identifier(col) for col in associated_columns), + associated_placeholders=sql.SQL(", ").join(sql.Placeholder() for _ in associated_columns) + ) + # 将数据插入数据库 + cur.execute(insert_sql, ( + cleaned_row['id'], cleaned_row['type'], cleaned_row['associated_element_id'], + cleaned_row.get('associated_pattern'), cleaned_row.get('associated_pipe_flow_id'), + *associated_values, cleaned_row.get('API_query_id'), + cleaned_row['transmission_mode'], cleaned_row['transmission_frequency'], + x_coor, y_coor, coord + )) + conn.commit() + print("数据成功导入到 'scada_info' 表格。") + except Exception as e: + print(f"导入时出错:{e}") + else: + print(f"scada_info文件不存在。") + + + +if __name__ == '__main__': + # contaminant_simulation('bb_model','2024-06-24T00:00:00Z','ZBBDTZDP009034',30,1800) + # flushing_analysis('bb_model','2024-04-01T08:00:00Z',{'GSD230719205857733F8F5214FF','GSD230719205857C0AF65B6A170'},'GSD2307192058570DEDF28E4F73',0,duration=900) + # flushing_analysis('bb_model', '2024-08-26T08:00:00Z', ['GSD2307192058572E5C0E14D83E'], [0.5], 'ZBBDTZDP009410', 0, + # duration=1800) + # valve_close_analysis('bb_model','2024-04-01T08:00:00Z',['GSD2307192058576122D929EE99(L)'],duration=1800) + + # burst_analysis('bb','2024-04-01T08:00:00Z','ZBBGXSZW000001',burst_size=200,duration=1800) + #run_simulation('beibeizone','2024-04-01T08:00:00Z') +# str_dump=dump_output('h:\\OneDrive\\tjwaterserver\\temp\\beibeizone.db_no_burst.out') +# with open("out_dump.txt", "w") as f: +# f.write(str_dump) +# str_dump=dump_output('h:\\OneDrive\\tjwaterserver\\temp\\beibeizone.db_busrtID(ZBBGXSZW000001).out') +# with open("burst_out_dump.txt", "w") as f: +# f.write(str_dump) +# network_update('model22_1223.inp') + submit_scada_info('bb', '4490') diff --git a/simulation.py b/simulation.py new file mode 100644 index 0000000..db5e935 --- /dev/null +++ b/simulation.py @@ -0,0 +1,1063 @@ +import numpy as np +from tjnetwork import * +from api.s36_wda_cal import * +from get_real_status import * +from datetime import datetime,timedelta +from math import modf +import json +import pytz +import requests +import time +from epanet.epanet import Output +from typing import Optional, Tuple +import influxdb_api +import typing +import psycopg +import logging +import globals + +# 数据接口 +# url_path = 'http://10.101.15.16:9000/loong' # 内网 +# url_path = 'http://183.64.62.100:9057/loong' # 外网 +# url_real = url_path + '/api/mpoints/realValue' +# url_hist = url_path + '/api/curves/data' + +# 实时数据的设备编号 +DN_900_ID='2498' +DN_500_ID='3854' +DN_1000_ID='3853' +H_RESSURE='2510' +L_PRESURE='2514' +H_TANK='4780' +L_TANK='4854' + +# inp文件数据信息 +PATTERN_TIME_STEP = 15.0 + +# regions +# regions = ['hp', 'lp'] +# regions_demand_patterns = {'hp': ['DN900', 'DN500'], 'lp': ['DN1000']} # 出厂水量近似表示用水量 +# # regions_patterns = {'hp': ['ChuanYiJiXiao', 'BeiQuanHuaYuan', 'ZhuangYuanFuDi', 'JingNingJiaYuan', +# # '308', 'JiaYinYuan', 'XinChengGuoJi', 'YiJingBeiChen', 'ZhongYangXinDu', +# # 'XinHaiJiaYuan', 'DongFengJie', 'DingYaXinYu', 'ZiYunTai', 'XieMaGuangChang', +# # 'YongJinFu', 'BianDianZhan', 'BeiNanDaDao', 'TianShengLiJie', 'XueYuanXiaoQu', +# # 'YunHuaLu', 'GaoJiaQiao', 'LuZuoFuLuXiaDuan', 'TianRunCheng', 'CaoJiaBa', +# # 'PuLingChang', 'QiLongXiaoQu', 'TuanXiao', +# # 'TuanShanBaoZhongShiHua', 'XieMa', 'BeiWenQuanJiuHaoErQi', 'LaiYinHuSiQi', +# # 'DN500', 'DN900'], +# # 'lp': ['PanXiMingDu', 'WanKeJinYuHuaFuGaoCeng', 'KeJiXiao', +# # 'LuGouQiao', 'LongJiangHuaYuan', 'LaoQiZhongDui', 'ShiYanCun', 'TianQiDaSha', +# # 'TianShengPaiChuSuo', 'TianShengShangPin', 'JiaoTang', 'RenMinHuaYuan', +# # 'TaiJiBinJiangYiQi', 'TianQiHuaYuan', 'TaiJiBinJiangErQi', '122Zhong', +# # 'WanKeJinYuHuaFuYangFang', 'ChengBeiCaiShiKou', 'WenXingShe', 'YueLiangTianBBGJCZ', +# # 'YueLiangTian', 'YueLiangTian200', 'ChengTaoChang', 'HuoCheZhan', 'LiangKu', 'QunXingLu', +# # 'JiuYuanErTongYiYuan', 'TangDouHua', 'TaiJiBinJiangErQi(SanJi)', +# # 'ZhangDouHua', 'JinYunXiaoQuDN400', +# # 'DN1000']} +# +# # nodes +# monitor_single_patterns = ['ChuanYiJiXiao', 'BeiQuanHuaYuan', 'ZhuangYuanFuDi', 'JingNingJiaYuan', +# '308', 'JiaYinYuan', 'XinChengGuoJi', 'YiJingBeiChen', 'ZhongYangXinDu', +# 'XinHaiJiaYuan', 'DongFengJie', 'DingYaXinYu', 'ZiYunTai', 'XieMaGuangChang', +# 'YongJinFu', 'PanXiMingDu', 'WanKeJinYuHuaFuGaoCeng', 'KeJiXiao', +# 'LuGouQiao', 'LongJiangHuaYuan', 'LaoQiZhongDui', 'ShiYanCun', 'TianQiDaSha', +# 'TianShengPaiChuSuo', 'TianShengShangPin', 'JiaoTang', 'RenMinHuaYuan', +# 'TaiJiBinJiangYiQi', 'TianQiHuaYuan', 'TaiJiBinJiangErQi', '122Zhong', +# 'WanKeJinYuHuaFuYangFang'] +# monitor_single_patterns_id = {'ChuanYiJiXiao': '7338', 'BeiQuanHuaYuan': '7315', 'ZhuangYuanFuDi': '7316', +# 'JingNingJiaYuan': '7528', '308': '8272', 'JiaYinYuan': '7304', +# 'XinChengGuoJi': '7325', 'YiJingBeiChen': '7328', 'ZhongYangXinDu': '7329', +# 'XinHaiJiaYuan': '9138', 'DongFengJie': '7302', 'DingYaXinYu': '7331', +# 'ZiYunTai': '7420,9059', 'XieMaGuangChang': '7326', 'YongJinFu': '9059', +# 'PanXiMingDu': '7320', 'WanKeJinYuHuaFuGaoCeng': '7419', +# 'KeJiXiao': '7305', 'LuGouQiao': '7306', 'LongJiangHuaYuan': '7318', +# 'LaoQiZhongDui': '9075', 'ShiYanCun': '7309', 'TianQiDaSha': '7323', +# 'TianShengPaiChuSuo': '7335', 'TianShengShangPin': '7324', 'JiaoTang': '7332', +# 'RenMinHuaYuan': '7322', 'TaiJiBinJiangYiQi': '7333', 'TianQiHuaYuan': '8235', +# 'TaiJiBinJiangErQi': '7334', '122Zhong': '7314', 'WanKeJinYuHuaFuYangFang': '7418'} +# +# monitor_unity_patterns = ['BianDianZhan', 'BeiNanDaDao', 'TianShengLiJie', 'XueYuanXiaoQu', +# 'YunHuaLu', 'GaoJiaQiao', 'LuZuoFuLuXiaDuan', 'TianRunCheng', +# 'CaoJiaBa', 'PuLingChang', 'QiLongXiaoQu', 'TuanXiao', +# 'ChengBeiCaiShiKou', 'WenXingShe', 'YueLiangTianBBGJCZ', +# 'YueLiangTian', 'YueLiangTian200', +# 'ChengTaoChang', 'HuoCheZhan', 'LiangKu', 'QunXingLu', +# 'TuanShanBaoZhongShiHua', 'XieMa', 'BeiWenQuanJiuHaoErQi', 'LaiYinHuSiQi', +# 'JiuYuanErTongYiYuan', 'TangDouHua', 'TaiJiBinJiangErQi(SanJi)', +# 'ZhangDouHua', '', +# 'DN500', 'DN900', 'DN1000'] +# monitor_unity_patterns_id = {'BianDianZhan': '7339', 'BeiNanDaDao': '7319', 'TianShengLiJie': '8242', +# 'XueYuanXiaoQu': '7327', 'YunHuaLu': '7312', 'GaoJiaQiao': '7340', +# 'LuZuoFuLuXiaDuan': '7343', 'TianRunCheng': '7310', 'CaoJiaBa': '7300', +# 'PuLingChang': '7307', 'QiLongXiaoQu': '7321', 'TuanXiao': '8963', +# 'ChengBeiCaiShiKou': '7330', 'WenXingShe': '7311', +# 'YueLiangTianBBGJCZ': '7313', 'YueLiangTian': '7313', 'YueLiangTian200': '7313', +# 'ChengTaoChang': '7301', 'HuoCheZhan': '7303', +# 'LiangKu': '7296', 'QunXingLu': '7308', +# 'DN500': '3854', 'DN900': '2498', 'DN1000': '3853'} +# monitor_patterns = monitor_single_patterns + monitor_unity_patterns +# monitor_patterns_id = {**monitor_single_patterns_id, **monitor_unity_patterns_id} + +# flow +# hp_flow_pattern_id = {'DN900': '2498', 'DN500': '3854'} +# lp_flow_pattern_id = {'DN1000': '3853'} +# +# # pumps +# pump_pattern_ids = ['1#', '2#', '3#', '4#', '5#', '6#', '7#'] +# pumps = ['PU00000', 'PU00001', 'PU00002', 'PU00003', 'PU00004', 'PU00005', 'PU00006'] +# variable_frequency_pumps = ['PU00004', 'PU00005', 'PU00006'] +# fixed_pumps_id = {'PU00000': '2747', 'PU00001': '2776', 'PU00002': '2730', 'PU00003': '2787'} +# variable_pumps_id = {'PU00004': '2500', 'PU00005': '2502', 'PU00006': '2504'} +# +# # reservoirs +# # reservoirs = ['ZBBDJSCP000002', 'R00003'] +# # reservoirs_id = {'ZBBDJSCP000002': '2497', 'R00003': '2571'} +# # tanks +# tanks = ['ZBBDTJSC000002', 'ZBBDTJSC000001'] +# tanks_id = {'ZBBDTJSC000002': '4780', 'ZBBDTJSC000001': '9774'} +# +# # 用于更改数据的SCADA设的ID +# change_data_device_ids = ['2498', '3854', '3853', '2497', '2571', '4780', '9774', +# '2747', '2776', '2730', '2787', '2500', '2502', '2504'] + +# # 实时数据类:element_id和api_query_id对应 +# reservoirs_id = {} +# tanks_id = {} +# fixed_pumps_id ={} +# variable_pumps_id = {} +# pressure_id = {} +# demand_id = {} +# quality_id = {} +# +# # 实时数据类:pattern_id和api_query_id对应 +# source_outflow_pattern_id = {} +# realtime_pipe_flow_pattern_id = {} +# pipe_flow_region_patterns = {} # 根据realtime的pipe_flow,对non_realtime的demand进行分区 +# +# # 分区查询 +# source_outflow_region = {} # 以绑定的管段作为value +# source_outflow_region_id = {} # 以api_query_id作为value +# source_outflow_region_patterns = {} # 以associated_pattern作为value +# # 非实时数据的pattern +# non_realtime_region_patterns = {} # 基于source_outflow_region进行区分 +# +# realtime_region_pipe_flow_and_demand_id = {} # 基于source_outflow_region搜索该分区中的实时pipe_flow和demand的api_query_id,后续用region的流量 - 实时流量计的流量 +# realtime_region_pipe_flow_and_demand_patterns = {} # 基于source_outflow_region搜索该分区中的实时pipe_flow和demand的associated_pattern,后续用region的流量 - 实时流量计的流量 + + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + + +def query_corresponding_element_id_and_query_id(name: str) -> None: + """ + 查询scada_info这张表中,realtime类型的记录中,associated_element_id与api_query_id的对应关系 + :param name: 数据库名称 + :return: + """ + # 连接数据库 + conn_string = f"dbname={name} host=127.0.0.1" + try: + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 查询 transmission_mode 为 'realtime' 的记录 + cur.execute(""" + SELECT type, associated_element_id, api_query_id + FROM scada_info + WHERE transmission_mode = 'realtime'; + """) + records = cur.fetchall() + + # 遍历查询结果,并根据 type 将数据存储到相应的字典中 + for record in records: + type_, associated_element_id, api_query_id = record + + if type_ == 'reservoir_liquid_level': + globals.reservoirs_id[associated_element_id] = api_query_id + elif type_ == 'tank_liquid_level': + globals.tanks_id[associated_element_id] = api_query_id + elif type_ == 'fixed_pump': + globals.fixed_pumps_id[associated_element_id] = api_query_id + elif type_ == 'variable_pump': + globals.variable_pumps_id[associated_element_id] = api_query_id + elif type_ == 'pressure': + globals.pressure_id[associated_element_id] = api_query_id + elif type_ == 'demand': + globals.demand_id[associated_element_id] = api_query_id + elif type_ == 'quality': + globals.quality_id[associated_element_id] = api_query_id + else: + # 如果遇到未定义的类型,可以选择记录日志或忽略 + print(f"未处理的类型: {type_}") + except psycopg.Error as e: + print(f"数据库连接或查询出错: {e}") + + +def query_corresponding_pattern_id_and_query_id(name: str) -> None: + """ + 查询 scada_info 表中 transmission_mode 为 'realtime',且 type 为 'source_outflow' 或 'pipe_flow' 的记录, + 提取 associated_pattern 和 api_query_id 的对应关系,并分别存储到对应的字典中。 + :param name: 数据库名称 + :return: + """ + # 连接数据库 + conn_string = f"dbname={name} host=127.0.0.1" + try: + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 查询 transmission_mode 为 'realtime' 且 type 为 'source_outflow' 或 'pipe_flow' 的记录 + cur.execute(""" + SELECT type, associated_pattern, api_query_id + FROM scada_info + WHERE transmission_mode = 'realtime' + AND type IN ('source_outflow', 'pipe_flow'); + """) + records = cur.fetchall() + + # 遍历查询结果,并根据 type 将数据存储到相应的字典中 + for record in records: + type_, associated_pattern, api_query_id = record + + if type_ == 'source_outflow': + globals.source_outflow_pattern_id[associated_pattern] = api_query_id + elif type_ == 'pipe_flow': + globals.realtime_pipe_flow_pattern_id[associated_pattern] = api_query_id + except psycopg.Error as e: + print(f"数据库连接或查询出错: {e}") + + +# 2025/01/11 +def query_non_realtime_region(name: str) -> dict: + """ + 查询 scada_info 表中 transmission_mode 为 'non_realtime',且 type 为 'pipe_flow' 的记录, + 提取所有以 'associated_source_outflow_id' 开头的列的值,并将每条记录的这些值作为一个 region(region1, region2, ...), + 最后去掉重复的 region,并存储到 source_outflow_region 字典中。 + :param name: 数据库名字 + :return: 包含区域与对应 associated_source_outflow_id 的字典 + """ + source_outflow_regions = [] # 用于存储所有 region(包含重复的) + # 构建连接字符串 + conn_string = f"dbname={name} host=127.0.0.1" + + try: + # 连接到数据库 + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 执行查询,筛选出 transmission_mode 为 'non_realtime' 且 type 为 'pipe_flow' 的记录 + cur.execute(""" + SELECT * + FROM scada_info + WHERE transmission_mode = 'non_realtime' + AND type = 'pipe_flow'; + """) + + records = cur.fetchall() + col_names = [desc.name for desc in cur.description] + + # 找出所有以 'associated_source_outflow_id' 开头的列 + source_outflow_cols = [col for col in col_names if col.startswith('associated_source_outflow_id')] + logging.info(f"Identified source_outflow columns: {source_outflow_cols}") + + for record in records: + # 提取所有以 'associated_source_outflow_id' 开头的列的值,排除 None + values = [record[col_names.index(col)] for col in source_outflow_cols if + record[col_names.index(col)] is not None] + + # 如果该记录有相关的值,则将其作为一个 region + if values: + # 将值排序以确保相同的组合顺序一致(如果顺序不重要) + # 如果顺序重要,请删除排序步骤 + region_tuple = tuple(sorted(values)) + source_outflow_regions.append(region_tuple) + + # 移除重复的 regions + unique_regions = [] + seen = set() + for region in source_outflow_regions: + if region not in seen: + seen.add(region) + unique_regions.append(region) + + # 为每个唯一的 region 分配一个 region 键 + for idx, region in enumerate(unique_regions, 1): + region_key = f"region{idx}" + globals.source_outflow_region[region_key] = list(region) + + logging.info("查询并处理数据成功。") + except psycopg.Error as e: + logging.error(f"数据库连接或查询出错: {e}") + except Exception as ex: + logging.error(f"处理数据时出错: {ex}") + + return globals.source_outflow_region + + +# 2025/01/18 +def query_non_realtime_region_patterns(name: str, source_outflow_region: dict, column_prefix: str = 'associated_source_outflow_id') -> dict: + """ + 根据 source_outflow_region,对 scada_info 表中 transmission_mode 为 'non_realtime'的记录进行分组, + 将匹配的记录的 associated_pattern 存入 non_realtime_region_patterns 字典中,同时把用 realtime pipe_flow修正的 non_realtime demand 去掉 + :param name: 数据库名称 + :param source_outflow_region: 包含区域与对应 associated_source_outflow_id 的字典 + :param column_prefix: 需要提取的列的前缀 + :return: 包含区域与对应 associated_pattern 的字典 + """ + globals.non_realtime_region_patterns = {region: [] for region in globals.source_outflow_region.keys()} + region_tuple_to_key = {frozenset(ids): region for region, ids in globals.source_outflow_region.items()} + conn_string = f"dbname={name} host=127.0.0.1" + + try: + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 执行查询,筛选出 transmission_mode 为 'non_realtime' + cur.execute(""" + SELECT * + FROM scada_info + WHERE transmission_mode = 'non_realtime' + """) + + records = cur.fetchall() + col_names = [desc.name for desc in cur.description] + + # 找出所有以指定前缀开头的列 + source_outflow_cols = [col for col in col_names if col.startswith(column_prefix)] + logging.info(f"Identified source_outflow columns: {source_outflow_cols}") + + # 确保 'associated_pattern' 列存在 + if 'associated_pattern' not in col_names: + logging.error("'associated_pattern' column not found in scada_info table.") + return globals.non_realtime_region_patterns + + # 获取 'associated_pattern' 列的索引 + pattern_idx = col_names.index('associated_pattern') + + for record in records: + # 提取所有以 'associated_source_outflow_id' 开头的列的值,排除 None + values = [record[col_names.index(col)] for col in source_outflow_cols if + record[col_names.index(col)] is not None] + + if values: + # 将值转换为 frozenset 以便与 region_tuple_to_key 进行匹配 + region_frozenset = frozenset(values) + + # 检查是否存在匹配的 region + region_key = region_tuple_to_key.get(region_frozenset) + if region_key: + # 获取 'associated_pattern' 的值 + associated_pattern = record[pattern_idx] + if associated_pattern is not None: + globals.non_realtime_region_patterns[region_key].append(associated_pattern) + + logging.info("生成 regions_patterns 成功。") + except psycopg.Error as e: + logging.error(f"数据库连接或查询出错: {e}") + except Exception as ex: + logging.error(f"处理数据时出错: {ex}") + + # 获取pipe_flow_region_patterns中的所有区域 + exclude_regions = set(region for regions in globals.pipe_flow_region_patterns.values() for region in regions) + + # 从non_realtime_region_patterns中去除这些区域 + for region_key, regions in globals.non_realtime_region_patterns.items(): + globals.non_realtime_region_patterns[region_key] = [region for region in regions if region not in exclude_regions] + + return globals.non_realtime_region_patterns + + +# 2025/01/18 +def query_realtime_region_pipe_flow_and_demand_id(name: str, source_outflow_region: dict, column_prefix: str = 'associated_source_outflow_id') -> dict: + """ + 根据 source_outflow_region,对 scada_info 表中 transmission_mode 为 'realtime', + 且 type 为 'pipe_flow' 或 ‘demand’ 的记录进行分组,将匹配的记录的 api_query_id 存入 realtime_region_pipe_flow_and_demand_id 字典中。 + :param name: 数据库名称 + :param source_outflow_region: 包含区域与对应 associated_source_outflow_id 的字典 + :param column_prefix: 需要提取的列的前缀 + :return: 包含区域与对应 api_query_id 的字典 + """ + globals.realtime_region_pipe_flow_and_demand_id = {region: [] for region in globals.source_outflow_region.keys()} + # 创建一个映射,从 frozenset(ids) 到 region_key + region_tuple_to_key = {frozenset(ids): region for region, ids in globals.source_outflow_region.items()} + conn_string = f"dbname={name} host=127.0.0.1" + + try: + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 执行查询,筛选出 transmission_mode 为 'realtime' 且 type 为 'pipe_flow' 或 'demand' 的记录 + cur.execute(""" + SELECT * + FROM scada_info + WHERE transmission_mode = 'realtime' + AND type IN ('pipe_flow', 'demand'); + """) + + records = cur.fetchall() + col_names = [desc.name for desc in cur.description] + + # 找出所有以指定前缀开头的列 + source_outflow_cols = [col for col in col_names if col.startswith(column_prefix)] + logging.info(f"Identified source_outflow columns: {source_outflow_cols}") + + # 确保 'api_query_id' 列存在 + if 'api_query_id' not in col_names: + logging.error("'api_query_id' column not found in scada_info table.") + return globals.realtime_region_pipe_flow_and_demand_id + + # 获取 'api_query_id' 列的索引 + api_query_id_idx = col_names.index('api_query_id') + + for record in records: + # 提取所有以 'associated_source_outflow_id' 开头的列的值,排除 None + values = [record[col_names.index(col)] for col in source_outflow_cols if + record[col_names.index(col)] is not None] + + if values: + # 将值转换为 frozenset 以便与 region_tuple_to_key 进行匹配 + region_frozenset = frozenset(values) + + # 检查是否存在匹配的 region + region_key = region_tuple_to_key.get(region_frozenset) + if region_key: + # 获取 'api_query_id' 的值 + api_query_id = record[api_query_id_idx] + if api_query_id is not None: + globals.realtime_region_pipe_flow_and_demand_id[region_key].append(api_query_id) + + logging.info("生成 realtime_region_pipe_flow_and_demand_id 成功。") + except psycopg.Error as e: + logging.error(f"数据库连接或查询出错: {e}") + except Exception as ex: + logging.error(f"处理数据时出错: {ex}") + + return globals.realtime_region_pipe_flow_and_demand_id + + +# 2025/01/17 +def query_pipe_flow_region_patterns(name: str, column_prefix: str = 'associated_pipe_flow_id') -> dict: + """ + 查询 scada_info 表中 type 为 'demand' 且 transmission_mode 为 'non_realtime' 的记录, + 记录该记录的 associated_pattern。 + 如果该记录的 associated_pipe_flow_id 存在, + 且根据 associated_pipe_flow_id 查询的 associated_element_id 对应的记录的 transmission_mode 为 'realtime', + 则将该记录的 associated_pattern 作为值记录到字典中,字典的 key 为 pipe_flow 类的 associated_pattern。 + 字典样式为:{'region1': ['P17021', 'ZBBGXSZW000377'], 'region2': ['P16504']} + :param name: 数据库名称 + :param column_prefix: 需要提取的列的前缀 + :return: pipe_flow_region_patterns 字典 + """ + conn_string = f"dbname={name} host=127.0.0.1" + + try: + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 查询 type 为 'demand' 且 transmission_mode 为 'non_realtime' 的记录 + cur.execute(""" + SELECT associated_pattern, associated_pipe_flow_id + FROM scada_info + WHERE type = 'demand' + AND transmission_mode = 'non_realtime'; + """) + + records = cur.fetchall() + col_names = [desc.name for desc in cur.description] + + # 获取列索引 + pattern_idx = col_names.index('associated_pattern') + pipe_flow_id_idx = col_names.index('associated_pipe_flow_id') + + for record in records: + associated_pattern = record[pattern_idx] + associated_pipe_flow_id = record[pipe_flow_id_idx] + + if associated_pipe_flow_id: + # 根据 associated_pipe_flow_id 查询对应的记录 + cur.execute(""" + SELECT associated_pattern, transmission_mode + FROM scada_info + WHERE associated_element_id = %s; + """, (associated_pipe_flow_id,)) + + pipe_flow_record = cur.fetchone() + if pipe_flow_record: + pipe_flow_associated_pattern = pipe_flow_record[0] + transmission_mode = pipe_flow_record[1] + + if transmission_mode == 'realtime': + # 将 associated_pattern 记录到字典中 + if pipe_flow_associated_pattern not in globals.pipe_flow_region_patterns: + globals.pipe_flow_region_patterns[pipe_flow_associated_pattern] = [] + + globals.pipe_flow_region_patterns[pipe_flow_associated_pattern].append(associated_pattern) + + + logging.info("生成 pipe_flow_region_patterns 成功。") + except psycopg.Error as e: + logging.error(f"数据库连接或查询出错: {e}") + except Exception as ex: + logging.error(f"处理数据时出错: {ex}") + + return globals.pipe_flow_region_patterns + +# 2025/01/11 +def get_source_outflow_region_id(name: str, source_outflow_region: dict, + column_prefix: str = 'associated_source_outflow_id') -> dict: + """ + 基于 source_outflow_region,将其中的 associated_source_outflow_id 替换为对应的 api_query_id, + 生成新的字典 source_outflow_region_id。 + + :param name: 数据库名称 + :param source_outflow_region: 包含区域与对应 associated_source_outflow_id 的字典 + :param column_prefix: 需要提取的列的前缀 + :return: 包含区域与对应 api_query_id 的字典 + """ + globals.source_outflow_region_id = {region: [] for region in globals.source_outflow_region.keys()} + # 提取所有唯一的 associated_source_outflow_id + all_ids = set() + for ids in globals.source_outflow_region.values(): + all_ids.update(ids) + + if not all_ids: + logging.warning("No associated_source_outflow_id found in source_outflow_region.") + return globals.source_outflow_region_id + + conn_string = f"dbname={name} host=127.0.0.1" + + try: + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 查询 associated_element_id 和 api_query_id + query = f""" + SELECT associated_element_id, api_query_id + FROM scada_info + WHERE associated_element_id = ANY(%s) + """ + cur.execute(query, (list(all_ids),)) + rows = cur.fetchall() + + # 构建 associated_source_outflow_id 到 api_query_id 的映射 + id_to_api_query_id = {} + for row in rows: + associated_id = row[0] + api_query_id = row[1] + if associated_id in all_ids and api_query_id is not None: + id_to_api_query_id[associated_id] = str(api_query_id) + + # 替换 source_outflow_region 中的 associated_source_outflow_id 为 api_query_id + for region, ids in globals.source_outflow_region.items(): + for id_ in ids: + api_id = id_to_api_query_id.get(id_) + if api_id: + globals.source_outflow_region_id[region].append(api_id) + else: + logging.warning(f"No api_query_id found for associated_source_outflow_id: {id_}") + + except psycopg.Error as e: + logging.error(f"数据库连接或查询出错: {e}") + except Exception as ex: + logging.error(f"处理数据时出错: {ex}") + + return globals.source_outflow_region_id + + +# 2025/01/18 +def get_realtime_region_patterns(name: str, source_outflow_region_id: dict, realtime_region_pipe_flow_and_demand_id: dict) -> (dict, dict): + """ + 根据每个 region,从 scada_info 表中查询 api_query_id 对应的 associated_pattern。 + 将结果分别存储到 source_outflow_region_patterns 和 realtime_region_pipe_flow_and_demand_patterns 两个字典中。 + :param name: 数据库名称 + :param source_outflow_region_id: 包含 region 与对应 api_query_id 的字典 + :param realtime_region_pipe_flow_and_demand_id: 包含 region 与对应 api_query_id 的字典 + :return: source_outflow_region_patterns 和 realtime_region_pipe_flow_and_demand_patterns 两个字典 + """ + # 初始化返回的字典 + globals.source_outflow_region_patterns = {region: [] for region in globals.source_outflow_region_id.keys()} + globals.realtime_region_pipe_flow_and_demand_patterns = {region: [] for region in + globals.realtime_region_pipe_flow_and_demand_id.keys()} + + conn_string = f"dbname={name} host=127.0.0.1" + try: + with psycopg.connect(conn_string) as conn: + with conn.cursor() as cur: + # 遍历每个 region + for region in globals.source_outflow_region_id.keys(): + # 获取 source_outflow_region_id 的 api_query_id 并查询 associated_pattern + source_outflow_api_ids = globals.source_outflow_region_id[region] + if source_outflow_api_ids: + api_query_ids_str = ", ".join([f"'{api_id}'" for api_id in source_outflow_api_ids]) + cur.execute(f""" + SELECT api_query_id, associated_pattern + FROM scada_info + WHERE api_query_id IN ({api_query_ids_str}); + """) + results = cur.fetchall() + globals.source_outflow_region_patterns[region] = [ + associated_pattern for _, associated_pattern in results if associated_pattern + ] + + # 获取 realtime_region_pipe_flow_and_demand_id 的 api_query_id 并查询 associated_pattern + realtime_api_ids = globals.realtime_region_pipe_flow_and_demand_id[region] + if realtime_api_ids: + api_query_ids_str = ", ".join([f"'{api_id}'" for api_id in realtime_api_ids]) + cur.execute(f""" + SELECT api_query_id, associated_pattern + FROM scada_info + WHERE api_query_id IN ({api_query_ids_str}); + """) + results = cur.fetchall() + globals.realtime_region_pipe_flow_and_demand_patterns[region] = [ + associated_pattern for _, associated_pattern in results if associated_pattern + ] + + logging.info("生成 source_outflow_region_patterns 和 realtime_region_pipe_flow_and_demand_patterns 成功。") + except psycopg.Error as e: + logging.error(f"数据库连接或查询出错: {e}") + except Exception as ex: + logging.error(f"处理数据时出错: {ex}") + + return globals.source_outflow_region_patterns, globals.realtime_region_pipe_flow_and_demand_patterns + + +def get_pattern_index(cur_datetime: str) -> int: + """ + 根据给定的日期时间字符串,计算并返回对应的模式索引。 + :param cur_datetime: str, 当前的日期时间字符串,格式为“YYYY-MM-DD HH:MM:SS”。 + :return: int, 基于预定义的时间步长 PATTERN_TIME_STEP。 + """ + str_format = "%Y-%m-%d %H:%M:%S" + dt = datetime.strptime(cur_datetime, str_format) + hr = dt.hour + mnt = dt.minute + i = int((hr * 60 + mnt) / PATTERN_TIME_STEP) + return i + + +def get_pattern_index_str(current_time: str) -> str: + """ + 根据当前时间获取时间步长的模式索引,并将其格式化为“HH:MM:00”字符串。 + :param current_time: str, 当前时间,格式为"YYYY-MM-DD HH:MM:SS" + :return: str, 以“HH:MM:00”格式返回 + """ + i = get_pattern_index(current_time) + [minN, hrN] = modf(i * PATTERN_TIME_STEP / 60) + minN_str = str(int(minN * 60)) + minN_str = minN_str.zfill(2) + hrN_str = str(int(hrN)) + hrN_str = hrN_str.zfill(2) + str_i = '{}:{}:00'.format(hrN_str, minN_str) + return str_i + +def from_seconds_to_clock (secs: int)->str: + """ + 从秒格式化为“HH:MM:00”字符串 + :param secs: int,秒 + :return: str, 以“HH:MM:00”格式返回 + """ + hrs=int(secs/3600) + minutes=int((secs-hrs*3600)/60) + seconds=(secs-hrs*3600-minutes*60) + hrs_str=str(hrs).zfill(2) + minutes_str=str(minutes).zfill(2) + seconds_str=str(seconds).zfill(2) + str_clock='{}:{}:{}'.format(hrs_str,minutes_str,seconds_str) + return str_clock + + +def convert_time_format(original_time: str) -> str: + """ + 格式转换,将“2024-04-13T08:00:00+08:00"转为“2024-04-13 08:00:00” + :param original_time: str, “2024-04-13T08:00:00+08:00"格式的时间 + :return: str,“2024-04-13 08:00:00”格式的时间 + """ + new_time = original_time.replace('T', ' ') + new_time = new_time.replace('+08:00', '') + return new_time + + +def get_history_pattern_info(project_name, pattern_name): + """读取选定pattern的保存的历史pattern信息flow""" + flow_list = [] + patterns_info = read_all(project_name, + f"select * from history_patterns_flows where id = '{pattern_name}' order by _order") + for item in patterns_info: + flow_list.append(float(item['flow'])) + return flow_list + + +# 2025/01/11 +def run_simulation(name: str, simulation_type: str, modify_pattern_start_time: str, modify_total_duration: int = 0, + modify_reservoir_head_pattern: dict[str, list] = None, modify_tank_initial_level: dict[str, float] = None, + modify_junction_base_demand: dict[str, float] = None, modify_junction_damand_pattern: dict[str, list] = None, + modify_pump_pattern: dict[str, list] = None): + """ + 传入需要修改的参数,改变数据库中对应位置的值,然后计算,返回结果 + :param name: 模型名称,数据库中对应的名字 + :param simulation_type: 模拟的类型,realtime为实时模拟,修改原数据库;extended为多步长模拟,需要复制数据库 + :param modify_pattern_start_time: 模拟开始时间,格式为'2024-11-25T09:00:00+08:00' + :param modify_total_duration: 模拟总历时 + :param modify_reservoir_head_pattern: dict中包含多个水库模式,str为水库head_pattern的id,list为修改后的head_pattern + :param modify_tank_initial_level: dict中包含多个水塔,str为水塔的id,float为修改后的initial_level + :param modify_junction_base_demand: dict中包含多个节点,str为节点的id,float为修改后的base_demand + :param modify_junction_damand_pattern: dict中包含多个节点模式,str为节点demand_pattern的id,list为修改后的demand_pattern + :param modify_pump_pattern: dict中包含多个水泵模式,str为水泵pattern的id,list为修改后的pattern + :return: + """ + # 记录开始时间 + time_cost_start = time.perf_counter() + print('{} -- Hydraulic simulation started.'.format( + datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'))) + + # 重新打开数据库 + if is_project_open(name): + close_project(name) + # 判断是实时模拟还是多步长模拟 + if simulation_type.upper() == 'REALTIME': # 实时模拟(修改原数据库) + name_c = name + elif simulation_type.upper() == 'EXTENDED': # 扩展模拟(复制数据库) + name_c = '_'.join([name, 'c']) + if have_project(name_c): + if is_project_open(name_c): + close_project(name_c) + delete_project(name_c) + copy_project(name, name_c) # 备份项目 + else: + raise Exception('Incorrect simulation type, choose in (realtime, extended)') + # 打开数据库 + open_project(name_c) + + # 对输入的时间参数进行处理 + pattern_start_time = convert_time_format(modify_pattern_start_time) + # 获取模拟开始时间是对应pattern的第几个数 + modify_index = get_pattern_index(pattern_start_time) + + # 遍历水泵的pattern_id,并根据输入的pump_pattern修改pattern的值 + # for pump_pattern_id in pump_pattern_ids: + # # 检查pump_pattern中pump_pattern_id对应的第一个频率值是否为有效数字(非空、非NaN)。如果该值有效,则继续执行代码块。 + # if not np.isnan(modify_pump_pattern[pump_pattern_id][0]): + # # 取出数据库中的pattern + # pump_pattern = get_pattern(name_c, get_pump(name_c, pump_pattern_id)['pattern']) + # # 替换数据库中的pattern为modify_pump_pattern + # pump_pattern['factors'][modify_index: modify_index + len(modify_pump_pattern[pump_pattern_id])] \ + # = modify_pump_pattern[pump_pattern_id] + # cs = ChangeSet() + # cs.append(pump_pattern) + # set_pattern(name_c, cs) + + # 修改模拟开始的时间 + str_pattern_start = get_pattern_index_str(convert_time_format(modify_pattern_start_time)) + dic_time = get_time(name_c) + dic_time['PATTERN START'] = str_pattern_start + dic_time['DURATION'] = from_seconds_to_clock(modify_total_duration) + cs = ChangeSet() + cs.operations.append(dic_time) + set_time(name_c, cs) + + if globals.reservoirs_id: + # reservoirs_id = {'ZBBDJSCP000002': '2497', 'R00003': '2571'} + # 1.获取reservoir的SCADA数据,形式如{'2497': '3.1231', '2571': '2.7387'} + reservoir_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + query_ids_list=list(globals.reservoirs_id.values()), query_time=modify_pattern_start_time) + + # 2.构建出新字典,形式如{'ZBBDJSCP000002': '3.1231', 'R00003': '2.7387'} + reservoir_dict = {key: reservoir_SCADA_data_dict[value] for key, value in globals.reservoirs_id.items()} + + # 3.修改reservoir液位模式 + for reservoir_name, value in reservoir_dict.items(): + if value and float(value) != 0: + # 先根据reservoir获取对应的pattern,再对pattern进行修改 + reservoir_pattern = get_pattern(name_c, get_reservoir(name_c, reservoir_name)['pattern']) + reservoir_pattern['factors'][modify_index] = float(value) + globals.RESERVOIR_BASIC_HEIGHT + cs = ChangeSet() + cs.append(reservoir_pattern) + set_pattern(name_c, cs) + + if globals.tanks_id: + # 修改tank初始液位 + tank_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + query_ids_list=list(globals.tanks_id.values()), query_time=modify_pattern_start_time) + + tank_dict = {key: tank_SCADA_data_dict[value] for key, value in globals.tanks_id.items()} + + for tank_name, value in tank_dict.items(): + if value and float(value) != 0: + tank = get_tank(name_c, tank_name) + tank['init_level'] = float(value) + cs = ChangeSet() + cs.append(tank) + set_tank(name_c, cs) + + if globals.fixed_pumps_id: + # 修改工频泵的pattern + fixed_pump_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + query_ids_list=list(globals.fixed_pumps_id.values()), query_time=modify_pattern_start_time) + + fixed_pump_dict = {key: fixed_pump_SCADA_data_dict[value] for key, value in globals.fixed_pumps_id.items()} + + for fixed_pump_name, value in fixed_pump_dict.items(): + if value and float(value) != 0: + pump_pattern = get_pattern(name_c, get_pump(name_c, fixed_pump_name)['pattern']) + pump_pattern['factors'][modify_index] = float(value) + cs = ChangeSet() + cs.append(pump_pattern) + set_pattern(name_c, cs) + + if globals.variable_pumps_id: + # 修改变频泵的pattern + variable_pump_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + query_ids_list=list(globals.variable_pumps_id.values()), query_time=modify_pattern_start_time) + + variable_pump_dict = {key: variable_pump_SCADA_data_dict[value] for key, value in globals.variable_pumps_id.items()} + + for variable_pump_name, value in variable_pump_dict.items(): + if value and float(value) != 0: + pump_pattern = get_pattern(name_c, get_pump(name_c, fixed_pump_name)['pattern']) + pump_pattern['factors'][modify_index] = float(value) / 50 + cs = ChangeSet() + cs.append(pump_pattern) + set_pattern(name_c, cs) + + if globals.demand_id: + # 基于实时数据,修改大用户节点的pattern + demand_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + query_ids_list=list(globals.demand_id.values()), query_time=modify_pattern_start_time) + + demand_dict = {key: demand_SCADA_data_dict[value] for key, value in globals.demand_id.items()} + + for demand_name, value in demand_dict.items(): + if value and float(value) != 0: + demand_pattern = get_pattern(name_c, get_demand(name_c, demand_name)['pattern']) + if get_option(name_c)['UNITS'] == 'LPS': + demand_pattern['factors'][modify_index] = float(value) / 3.6 # m3/h 转换为 L/s + elif get_option(name_c)['UNITS'] == 'CMH': + demand_pattern['factors'][modify_index] = float(value) + cs = ChangeSet() + cs.append(demand_pattern) + set_pattern(name_c, cs) + + # 水质、压力实时数据使用方法待补充 + ############################# + + if globals.source_outflow_pattern_id: + # 基于实时的出厂流量计数据,修改出厂流量计绑定的pattern + source_outflow_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + query_ids_list=list(globals.source_outflow_pattern_id.values()), query_time=modify_pattern_start_time) + print(source_outflow_SCADA_data_dict) + + source_outflow_dict = {key: source_outflow_SCADA_data_dict[value] for key, value in globals.source_outflow_pattern_id.items()} + print(source_outflow_dict) + + for pattern_name in source_outflow_dict.keys(): + print(pattern_name) + history_source_outflow_list = get_history_pattern_info(name_c, pattern_name) + history_source_outflow = history_source_outflow_list[modify_index] + print(source_outflow_dict[pattern_name]) + realtime_source_outflow = float(source_outflow_dict[pattern_name]) + + multiply_factor = realtime_source_outflow / history_source_outflow + + pattern = get_pattern(name_c, pattern_name) + pattern['factors'][modify_index] *= multiply_factor + cs = ChangeSet() + cs.append(pattern) + set_pattern(name_c, cs) + + if globals.realtime_pipe_flow_pattern_id: + # 基于实时的pipe_flow类数据,修改pipe_flow类绑定的pattern + realtime_pipe_flow_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + query_ids_list=list(globals.realtime_pipe_flow_pattern_id.values()), query_time=modify_pattern_start_time) + + realtime_pipe_flow_dict = {key: realtime_pipe_flow_SCADA_data_dict[value] for key, value in globals.realtime_pipe_flow_pattern_id.items()} + + for pattern_name in realtime_pipe_flow_dict.keys(): + history_pipe_flow_list = get_history_pattern_info(name_c, pattern_name) + history_pipe_flow = history_pipe_flow_list[modify_index] + + realtime_pipe_flow = float(realtime_pipe_flow_dict[pattern_name]) + + multiply_factor = realtime_pipe_flow / history_pipe_flow + + pattern = get_pattern(name_c, pattern_name) + pattern['factors'][modify_index] *= multiply_factor + cs = ChangeSet() + cs.append(pattern) + set_pattern(name_c, cs) + + if globals.pipe_flow_region_patterns: + # 基于实时的pipe_flow类数据,修改pipe_flow分区流量计范围内的non_realtime的demand绑定的pattern + temp_realtime_pipe_flow_pattern_id = {} + # 遍历 pipe_flow_region_patterns 字典的 key + for pipe_flow_region, demand_patterns in globals.pipe_flow_region_patterns.items(): + # 获取对应的实时值 + query_api_id = globals.realtime_pipe_flow_pattern_id.get(pipe_flow_region) + temp_realtime_pipe_flow_pattern_id[pipe_flow_region] = query_api_id + + temp_realtime_pipe_flow_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + query_ids_list=list(temp_realtime_pipe_flow_pattern_id.values()), query_time=modify_pattern_start_time) + + temp_realtime_pipe_flow_dict = {key: temp_realtime_pipe_flow_SCADA_data_dict[value] for key, value in temp_realtime_pipe_flow_pattern_id.items()} + + for pattern_name in temp_realtime_pipe_flow_dict.keys(): + temp_history_pipe_flow_list = get_history_pattern_info(name_c, pattern_name) + temp_history_pipe_flow = temp_history_pipe_flow_list[modify_index] + + temp_realtime_pipe_flow = float(temp_realtime_pipe_flow_dict[pattern_name]) + + temp_multiply_factor = temp_realtime_pipe_flow / temp_history_pipe_flow + + temp_non_realtime_demand_pattern_list = globals.pipe_flow_region_patterns[pattern_name] + for demand_pattern_name in temp_non_realtime_demand_pattern_list: + pattern = get_pattern(name_c, demand_pattern_name) + pattern['factors'][modify_index] *= temp_multiply_factor + cs = ChangeSet() + cs.append(pattern) + set_pattern(name_c, cs) + + if globals.source_outflow_region: + # 根据associated_source_outflow_id进行分区,各分区用(出厂的流量计 - 实时的pipe_flow和demand)进行数据更新 + for region in globals.source_outflow_region.keys(): + temp_source_outflow_region_id = globals.source_outflow_region_id.get(region, []) + temp_realtime_region_pipe_flow_and_demand_id = globals.realtime_region_pipe_flow_and_demand_id.get(region, []) + temp_source_outflow_region_patterns = globals.source_outflow_region_patterns.get(region, []) + temp_realtime_region_pipe_flow_and_demand_patterns = globals.realtime_region_pipe_flow_and_demand_patterns.get(region, []) + temp_non_realtime_region_patterns = globals.non_realtime_region_patterns.get(region, []) + + region_source_outflow_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + query_ids_list=temp_source_outflow_region_id, query_time=modify_pattern_start_time) + + region_realtime_region_pipe_flow_and_demand_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + query_ids_list=temp_realtime_region_pipe_flow_and_demand_id, query_time=modify_pattern_start_time) + + region_total_source_outflow = sum(float(value) for value in region_source_outflow_data_dict.values()) + history_region_total_source_outflow = 0 + for source_outflow_pattern_name in temp_source_outflow_region_patterns: + temp_history_source_outflow_list = get_history_pattern_info(name_c, source_outflow_pattern_name) + history_region_total_source_outflow += temp_history_source_outflow_list[modify_index] + + region_total_realtime_region_pipe_flow_and_demand = sum(float(value) for value in region_realtime_region_pipe_flow_and_demand_data_dict.values()) + history_region_total_realtime_region_pipe_flow_and_demand = 0 + for pipe_flow_and_demand_pattern_name in temp_realtime_region_pipe_flow_and_demand_patterns: + temp_history_pipe_flow_and_demand_list = get_history_pattern_info(name_c, pipe_flow_and_demand_pattern_name) + history_region_total_realtime_region_pipe_flow_and_demand += temp_history_pipe_flow_and_demand_list[modify_index] + + temp_multiply_factor = (region_total_source_outflow - region_total_realtime_region_pipe_flow_and_demand) / (history_region_total_source_outflow - history_region_total_realtime_region_pipe_flow_and_demand) + for non_realtime_region_pattern_name in temp_non_realtime_region_patterns: + pattern = get_pattern(name_c, non_realtime_region_pattern_name) + pattern['factors'][modify_index] *= temp_multiply_factor + cs = ChangeSet() + cs.append(pattern) + set_pattern(name_c, cs) + + # 根据高压出厂流量,更改高压用水模式 + # hp_flow_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + # query_ids_list=list(hp_flow_pattern_id.values()), query_time=modify_pattern_start_time) + # + # hp_flow_dict = {key: hp_flow_SCADA_data_dict[value] for key, value in hp_flow_pattern_id.items()} + # + # all_valid = all(value and float(value) != 0 for value in hp_flow_dict.values()) + # + # if all_valid: + # hp_total_SCADA_flow = sum(float(value) for value in hp_flow_dict.values()) + # hp_total_history_flow = 0 + # for pattern_name in hp_flow_dict.keys(): + # history_flow_list = get_history_pattern_info(name_c, pattern_name) + # hp_total_history_flow += history_flow_list[modify_index] + # + # multiply_factor1 = hp_total_SCADA_flow / hp_total_history_flow + # hp_pattern_list = regions_patterns['hp'] + # for pattern_name in hp_pattern_list: + # pattern = get_pattern(name_c, pattern_name) + # pattern['factors'][modify_index] *= multiply_factor1 + # cs = ChangeSet() + # cs.append(pattern) + # set_pattern(name_c, cs) + # + # # 根据低压出厂流量,更改低压用水模式 + # lp_flow_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( + # query_ids_list=list(lp_flow_pattern_id.values()), query_time=modify_pattern_start_time) + # + # lp_flow_dict = {key: lp_flow_SCADA_data_dict[value] for key, value in lp_flow_pattern_id.items()} + # + # all_valid2 = all(value and float(value) != 0 for value in lp_flow_dict.values()) + # + # if all_valid2: + # lp_total_SCADA_flow = sum(float(value) for value in lp_flow_dict.values()) + # lp_total_history_flow = 0 + # for pattern_name in lp_flow_dict.keys(): + # history_flow_list = get_history_pattern_info(name_c, pattern_name) + # lp_total_history_flow += history_flow_list[modify_index] + # + # multiply_factor2 = lp_total_SCADA_flow / lp_total_history_flow + # lp_pattern_list = regions_patterns['lp'] + # for pattern_name in lp_pattern_list: + # pattern = get_pattern(name_c, pattern_name) + # pattern['factors'][modify_index] *= multiply_factor2 + # cs = ChangeSet() + # cs.append(pattern) + # set_pattern(name_c, cs) + + + # 运行并返回结果 + result = run_project(name_c) + + time_cost_end = time.perf_counter() + print('{} -- Hydraulic simulation finished, cost time: {:.2f} s.'.format( + datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'), + time_cost_end - time_cost_start)) + + close_project(name_c) + + output = Output("./temp/{}.db.out".format(name_c)) + node_result = output.node_results() + link_result = output.link_results() + + + # print(link_result[:3]) + influxdb_api.store_realtime_simulation_result_to_influxdb(node_result, link_result, modify_pattern_start_time) + + + + + + +if __name__ == "__main__": + # 计算前,获取scada_info中的信息,按照设定的方法修改pg数据库 + query_corresponding_element_id_and_query_id("bb") + query_corresponding_pattern_id_and_query_id('bb') + region_result = query_non_realtime_region('bb') + + globals.source_outflow_region_id = get_source_outflow_region_id('bb', region_result) + globals.realtime_region_pipe_flow_and_demand_id = query_realtime_region_pipe_flow_and_demand_id('bb', region_result) + globals.pipe_flow_region_patterns = query_pipe_flow_region_patterns('bb') + + globals.non_realtime_region_patterns = query_non_realtime_region_patterns('bb', region_result) + globals.source_outflow_region_patterns, globals.realtime_region_pipe_flow_and_demand_patterns = get_realtime_region_patterns('bb', globals.source_outflow_region_id, globals.realtime_region_pipe_flow_and_demand_id) + + # 打印字典内容以验证 + # print("Reservoirs ID:", globals.reservoirs_id) + # print("Tanks ID:", globals.tanks_id) + # print("Fixed Pumps ID:", globals.fixed_pumps_id) + # print("Variable Pumps ID:", globals.variable_pumps_id) + # print("Pressure ID:", globals.pressure_id) + # print("Demand ID:", globals.demand_id) + # print("Quality ID:", globals.quality_id) + # print("Source Outflow Pattern ID:", globals.source_outflow_pattern_id) + # print("Realtime Pipe Flow Pattern ID:", globals.realtime_pipe_flow_pattern_id) + # print("Pipe Flow Region Patterns:", globals.pipe_flow_region_patterns) + # print("Source Outflow Region:", region_result) + # print('Source Outflow Region ID:', globals.source_outflow_region_id) + # print('Source Outflow Region Patterns:', globals.source_outflow_region_patterns) + # print("Non Realtime Region Patterns:", globals.non_realtime_region_patterns) + # print("Realtime Region Pipe Flow And Demand ID:", globals.realtime_region_pipe_flow_and_demand_id) + # print("Realtime Region Pipe Flow And Demand Patterns:", globals.realtime_region_pipe_flow_and_demand_patterns) + + run_simulation(name='bb', simulation_type="realtime", modify_pattern_start_time='2025-02-07T22:15:00+08:00') + + + + + + + + + + + + +