From 096c7b6475e7fa174793c9316795a8bbdb2e6f74 Mon Sep 17 00:00:00 2001 From: JIANG Date: Fri, 31 Oct 2025 17:16:36 +0800 Subject: [PATCH] =?UTF-8?q?=E5=AE=9E=E7=8E=B0=20API=20=E4=B8=AD=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=E7=9A=84=E6=96=B9=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- influxdb_api.py | 2985 +++++++++++++++++++++++++++++++---------------- main.py | 6 +- 2 files changed, 1977 insertions(+), 1014 deletions(-) diff --git a/influxdb_api.py b/influxdb_api.py index 69480e9..b73eefa 100644 --- a/influxdb_api.py +++ b/influxdb_api.py @@ -1,4 +1,13 @@ -from influxdb_client import InfluxDBClient, BucketsApi, WriteApi, OrganizationsApi, Point, QueryApi, WriteOptions, DeleteApi +from influxdb_client import ( + InfluxDBClient, + BucketsApi, + WriteApi, + OrganizationsApi, + Point, + QueryApi, + WriteOptions, + DeleteApi, +) from typing import List, Dict from datetime import datetime, timedelta, timezone from influxdb_client.client.write_api import SYNCHRONOUS, ASYNCHRONOUS @@ -24,7 +33,10 @@ import time_api url = influxdb_info.url token = influxdb_info.token org_name = influxdb_info.org -client = InfluxDBClient(url=url, token=token, org=org_name, timeout=600*1000) # 600 seconds +client = InfluxDBClient( + url=url, token=token, org=org_name, timeout=600 * 1000 +) # 600 seconds + def query_pg_scada_info_realtime(name: str) -> None: """ @@ -38,11 +50,13 @@ def query_pg_scada_info_realtime(name: str) -> None: with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 查询 transmission_mode 为 'realtime' 的记录 - cur.execute(""" + cur.execute( + """ SELECT type, api_query_id FROM scada_info WHERE transmission_mode = 'realtime'; - """) + """ + ) records = cur.fetchall() # 清空全局列表 globals.reservoir_liquid_level_realtime_ids.clear() @@ -59,7 +73,9 @@ def query_pg_scada_info_realtime(name: str) -> None: record_type, api_query_id = record if api_query_id is not None: # 确保 api_query_id 不为空 if record_type == "reservoir_liquid_level": - globals.reservoir_liquid_level_realtime_ids.append(api_query_id) + globals.reservoir_liquid_level_realtime_ids.append( + api_query_id + ) elif record_type == "tank_liquid_level": globals.tank_liquid_level_realtime_ids.append(api_query_id) elif record_type == "fixed_pump": @@ -102,20 +118,22 @@ def query_pg_scada_info_non_realtime(name: str) -> None: close_project(name) open_project(name) dic_time = get_time(name) - globals.hydraulic_timestep = dic_time['HYDRAULIC TIMESTEP'] + globals.hydraulic_timestep = dic_time["HYDRAULIC TIMESTEP"] # DingZQ, 2025-03-21 - #close_project(name) + # close_project(name) # 连接数据库 conn_string = f"dbname={name} host=127.0.0.1" try: with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 查询 transmission_mode 为 'non_realtime' 的记录 - cur.execute(""" + cur.execute( + """ SELECT type, api_query_id, transmission_frequency FROM scada_info WHERE transmission_mode = 'non_realtime'; - """) + """ + ) records = cur.fetchall() # 清空全局列表 globals.reservoir_liquid_level_non_realtime_ids.clear() @@ -133,7 +151,9 @@ def query_pg_scada_info_non_realtime(name: str) -> None: record_type, api_query_id, freq = record if api_query_id is not None: # 确保 api_query_id 不为空 if record_type == "reservoir_liquid_level": - globals.reservoir_liquid_level_non_realtime_ids.append(api_query_id) + globals.reservoir_liquid_level_non_realtime_ids.append( + api_query_id + ) elif record_type == "fixed_pump": globals.fixed_pump_non_realtime_ids.append(api_query_id) elif record_type == "variable_pump": @@ -152,7 +172,9 @@ def query_pg_scada_info_non_realtime(name: str) -> None: if freq is not None: transmission_frequencies.append(freq) # 计算 transmission_frequency 最大值 - globals.transmission_frequency = max(transmission_frequencies) if transmission_frequencies else None + globals.transmission_frequency = ( + max(transmission_frequencies) if transmission_frequencies else None + ) # 打印结果,方便调试 # print("Query completed. Results:") # print("Reservoir Liquid Level Non-Realtime IDs:", globals.reservoir_liquid_level_non_realtime_ids) @@ -172,28 +194,26 @@ def query_pg_scada_info_non_realtime(name: str) -> None: # 2025/03/23 def get_new_client() -> InfluxDBClient: """每次调用返回一个新的 InfluxDBClient 实例。""" - return InfluxDBClient(url=url, - token=token, - org=org_name, - enable_gzip=True, - timeout=600*1000) # 600 seconds + return InfluxDBClient( + url=url, token=token, org=org_name, enable_gzip=True, timeout=600 * 1000 + ) # 600 seconds -# 2025/04/11, DingZQ + +# 2025/04/11, DingZQ def create_write_options() -> WriteOptions: - ''' + """ 创建一个写入选项 - ''' + """ return WriteOptions( - jitter_interval=200, # 添加抖动以避免同时写入 - max_retry_delay=30000, # 最大重试延迟(毫秒) - max_retries=5, # 最大重试次数(0 表示不重试) - batch_size=10_000, # 每批次发送10,000个点 - flush_interval=10_000, # 10秒强制刷新 - retry_interval=5_000 # 失败重试间隔5秒 + jitter_interval=200, # 添加抖动以避免同时写入 + max_retry_delay=30000, # 最大重试延迟(毫秒) + max_retries=5, # 最大重试次数(0 表示不重试) + batch_size=10_000, # 每批次发送10,000个点 + flush_interval=10_000, # 10秒强制刷新 + retry_interval=5_000, # 失败重试间隔5秒 ) - # 2025/02/01 def delete_buckets(org_name: str) -> None: """ @@ -203,11 +223,15 @@ def delete_buckets(org_name: str) -> None: """ client = get_new_client() # 定义需要删除的 bucket 名称列表 - buckets_to_delete = ['SCADA_data', 'realtime_simulation_result', 'scheme_simulation_result'] + buckets_to_delete = [ + "SCADA_data", + "realtime_simulation_result", + "scheme_simulation_result", + ] buckets_api = client.buckets_api() buckets_obj = buckets_api.find_buckets(org=org_name) # 确保 buckets_obj 拥有 buckets 属性 - if hasattr(buckets_obj, 'buckets'): + if hasattr(buckets_obj, "buckets"): for bucket in buckets_obj.buckets: if bucket.name in buckets_to_delete: # 只删除特定名称的 bucket try: @@ -231,7 +255,11 @@ def create_and_initialize_buckets(org_name: str) -> None: """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) # 先删除原有的,然后再进行初始化 # delete_buckets(org_name) @@ -253,7 +281,7 @@ def create_and_initialize_buckets(org_name: str) -> None: write_api = client.write_api( write_options=WriteOptions(batch_size=1000, flush_interval=1000), success_callback=success_callback, - error_callback=error_callback + error_callback=error_callback, ) org_api = OrganizationsApi(client) # 获取 org_id @@ -266,7 +294,7 @@ def create_and_initialize_buckets(org_name: str) -> None: buckets = [ {"name": "SCADA_data", "retention_rules": []}, {"name": "realtime_simulation_result", "retention_rules": []}, - {"name": "scheme_simulation_result", "retention_rules": []} + {"name": "scheme_simulation_result", "retention_rules": []}, ] # 创建一个临时存储点数据的列表 points_to_write = [] @@ -276,89 +304,103 @@ def create_and_initialize_buckets(org_name: str) -> None: created_bucket = bucket_api.create_bucket( bucket_name=bucket["name"], retention_rules=bucket["retention_rules"], - org_id=org_id + org_id=org_id, ) print(f"Bucket '{bucket['name']}' created with ID: {created_bucket.id}") # 根据 Bucket 初始化数据 if bucket["name"] == "SCADA_data": - point = Point("SCADA") \ - .tag("date", None) \ - .tag("description", None) \ - .tag("device_ID", None) \ - .field("monitored_value", 0.0) \ - .field("datacleaning_value", 0.0) \ - .field("simulation_value", 0.0) \ - .time("2024-11-21T00:00:00Z", write_precision='s') + point = ( + Point("SCADA") + .tag("date", None) + .tag("description", None) + .tag("device_ID", None) + .field("monitored_value", 0.0) + .field("datacleaning_value", 0.0) + .field("simulation_value", 0.0) + .time("2024-11-21T00:00:00Z", write_precision="s") + ) points_to_write.append(point) # write_api.write(bucket="SCADA_data", org=org_name, record=point) # print("Initialized SCADA_data with default structure.") - elif bucket["name"] == "realtime_simulation_result": # realtime_simulation_result - link_point = Point("link") \ - .tag("date", None) \ - .tag("ID", None) \ - .field("flow", 0.0) \ - .field("leakage", 0.0) \ - .field("velocity", 0.0) \ - .field("headloss", 0.0) \ - .field("status", None) \ - .field("setting", 0.0) \ - .field("quality", 0.0) \ - .field("reaction", 0.0) \ - .field("friction", 0.0) \ - .time("2024-11-21T00:00:00Z", write_precision='s') + elif ( + bucket["name"] == "realtime_simulation_result" + ): # realtime_simulation_result + link_point = ( + Point("link") + .tag("date", None) + .tag("ID", None) + .field("flow", 0.0) + .field("leakage", 0.0) + .field("velocity", 0.0) + .field("headloss", 0.0) + .field("status", None) + .field("setting", 0.0) + .field("quality", 0.0) + .field("reaction", 0.0) + .field("friction", 0.0) + .time("2024-11-21T00:00:00Z", write_precision="s") + ) points_to_write.append(link_point) - node_point = Point("node") \ - .tag("date", None) \ - .tag("ID", None) \ - .field("head", 0.0) \ - .field("pressure", 0.0) \ - .field("actualdemand", 0.0) \ - .field("demanddeficit", 0.0) \ - .field("totalExternalOutflow", 0.0) \ - .field("quality", 0.0) \ - .time("2024-11-21T00:00:00Z", write_precision='s') + node_point = ( + Point("node") + .tag("date", None) + .tag("ID", None) + .field("head", 0.0) + .field("pressure", 0.0) + .field("actualdemand", 0.0) + .field("demanddeficit", 0.0) + .field("totalExternalOutflow", 0.0) + .field("quality", 0.0) + .time("2024-11-21T00:00:00Z", write_precision="s") + ) points_to_write.append(node_point) # write_api.write(bucket="realtime_simulation_result", org=org_name, record=link_point) # write_api.write(bucket="realtime_simulation_result", org=org_name, record=node_point) # print("Initialized realtime_simulation_result with default structure.") elif bucket["name"] == "scheme_simulation_result": - link_point = Point("link") \ - .tag("date", None) \ - .tag("ID", None) \ - .tag("scheme_Type", None) \ - .tag("scheme_Name", None) \ - .field("flow", 0.0) \ - .field("leakage", 0.0) \ - .field("velocity", 0.0) \ - .field("headloss", 0.0) \ - .field("status", None) \ - .field("setting", 0.0) \ - .field("quality", 0.0) \ - .time("2024-11-21T00:00:00Z", write_precision='s') + link_point = ( + Point("link") + .tag("date", None) + .tag("ID", None) + .tag("scheme_Type", None) + .tag("scheme_Name", None) + .field("flow", 0.0) + .field("leakage", 0.0) + .field("velocity", 0.0) + .field("headloss", 0.0) + .field("status", None) + .field("setting", 0.0) + .field("quality", 0.0) + .time("2024-11-21T00:00:00Z", write_precision="s") + ) points_to_write.append(link_point) - node_point = Point("node") \ - .tag("date", None) \ - .tag("ID", None) \ - .tag("scheme_Type", None) \ - .tag("scheme_Name", None) \ - .field("head", 0.0) \ - .field("pressure", 0.0) \ - .field("actualdemand", 0.0) \ - .field("demanddeficit", 0.0) \ - .field("totalExternalOutflow", 0.0) \ - .field("quality", 0.0) \ - .time("2024-11-21T00:00:00Z", write_precision='s') + node_point = ( + Point("node") + .tag("date", None) + .tag("ID", None) + .tag("scheme_Type", None) + .tag("scheme_Name", None) + .field("head", 0.0) + .field("pressure", 0.0) + .field("actualdemand", 0.0) + .field("demanddeficit", 0.0) + .field("totalExternalOutflow", 0.0) + .field("quality", 0.0) + .time("2024-11-21T00:00:00Z", write_precision="s") + ) points_to_write.append(node_point) - SCADA_point = Point("SCADA") \ - .tag("date", None) \ - .tag("description", None) \ - .tag("device_ID", None) \ - .tag("scheme_Type", None) \ - .tag("scheme_Name", None) \ - .field("monitored_value", 0.0) \ - .field("datacleaning_value", 0.0) \ - .field("scheme_simulation_value", 0.0) \ - .time("2024-11-21T00:00:00Z", write_precision='s') + SCADA_point = ( + Point("SCADA") + .tag("date", None) + .tag("description", None) + .tag("device_ID", None) + .tag("scheme_Type", None) + .tag("scheme_Name", None) + .field("monitored_value", 0.0) + .field("datacleaning_value", 0.0) + .field("scheme_simulation_value", 0.0) + .time("2024-11-21T00:00:00Z", write_precision="s") + ) points_to_write.append(SCADA_point) # write_api.write(bucket="scheme_simulation_result", org=org_name, record=link_point) # write_api.write(bucket="scheme_simulation_result", org=org_name, record=node_point) @@ -375,7 +417,9 @@ def create_and_initialize_buckets(org_name: str) -> None: client.close() -def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str = "SCADA_data") -> None: +def store_realtime_SCADA_data_to_influxdb( + get_real_value_time: str, bucket: str = "SCADA_data" +) -> None: """ 将SCADA数据通过数据接口导入数据库 :param get_real_value_time: 获取数据的时间,格式如'2024-11-25T09:00:00+08:00' @@ -384,7 +428,11 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) # 本地变量,用于记录成功写入的数据点数量 points_written = 0 @@ -404,7 +452,7 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str write_api = client.write_api( write_options=create_write_options(), success_callback=success_callback, - error_callback=error_callback + error_callback=error_callback, ) # 创建一个临时存储点数据的列表 @@ -413,11 +461,11 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str try_count = 0 reservoir_liquid_level_realtime_data_list = [] tank_liquid_level_realtime_data_list = [] - fixed_pump_realtime_data_list =[] - variable_pump_realtime_data_list =[] + fixed_pump_realtime_data_list = [] + variable_pump_realtime_data_list = [] source_outflow_realtime_data_list = [] pipe_flow_realtime_data_list = [] - pressure_realtime_data_list =[] + pressure_realtime_data_list = [] demand_realtime_data_list = [] quality_realtime_data_list = [] while try_count <= 5: # 尝试6次 ******* @@ -426,24 +474,41 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if globals.reservoir_liquid_level_realtime_ids: # print(globals.reservoir_liquid_level_realtime_ids) reservoir_liquid_level_realtime_data_list = get_realValue.get_realValue( - ids=','.join(globals.reservoir_liquid_level_realtime_ids)) + ids=",".join(globals.reservoir_liquid_level_realtime_ids) + ) # print(reservoir_liquid_level_realtime_data_list) if globals.tank_liquid_level_realtime_ids: - tank_liquid_level_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.tank_liquid_level_realtime_ids)) + tank_liquid_level_realtime_data_list = get_realValue.get_realValue( + ids=",".join(globals.tank_liquid_level_realtime_ids) + ) if globals.fixed_pump_realtime_ids: - fixed_pump_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.fixed_pump_realtime_ids)) + fixed_pump_realtime_data_list = get_realValue.get_realValue( + ids=",".join(globals.fixed_pump_realtime_ids) + ) if globals.variable_pump_realtime_ids: - variable_pump_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.variable_pump_realtime_ids)) + variable_pump_realtime_data_list = get_realValue.get_realValue( + ids=",".join(globals.variable_pump_realtime_ids) + ) if globals.source_outflow_realtime_ids: - source_outflow_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.source_outflow_realtime_ids)) + source_outflow_realtime_data_list = get_realValue.get_realValue( + ids=",".join(globals.source_outflow_realtime_ids) + ) if globals.pipe_flow_realtime_ids: - pipe_flow_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.pipe_flow_realtime_ids)) + pipe_flow_realtime_data_list = get_realValue.get_realValue( + ids=",".join(globals.pipe_flow_realtime_ids) + ) if globals.pressure_realtime_ids: - pressure_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.pressure_realtime_ids)) + pressure_realtime_data_list = get_realValue.get_realValue( + ids=",".join(globals.pressure_realtime_ids) + ) if globals.demand_realtime_ids: - demand_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.demand_realtime_ids)) + demand_realtime_data_list = get_realValue.get_realValue( + ids=",".join(globals.demand_realtime_ids) + ) if globals.quality_realtime_ids: - quality_realtime_data_list = get_realValue.get_realValue(ids=','.join(globals.quality_realtime_ids)) + quality_realtime_data_list = get_realValue.get_realValue( + ids=",".join(globals.quality_realtime_ids) + ) except Exception as e: print(e) time.sleep(10) @@ -453,8 +518,10 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if reservoir_liquid_level_realtime_data_list: for data in reservoir_liquid_level_realtime_data_list: # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 - data_time = datetime.fromisoformat(data['time']) - get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + data_time = datetime.fromisoformat(data["time"]) + get_real_value_time_dt = datetime.fromisoformat( + get_real_value_time + ).replace(tzinfo=None) # 将获取的时间转换为 UTC 时间 get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) # 计算时间差(绝对值) @@ -463,17 +530,20 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if time_difference > 60: # 超过1分钟 monitored_value = None else: # 小于等于3分钟 - monitored_value = float(data['monitored_value']) + monitored_value = float(data["monitored_value"]) # 创建Point对象 point = ( - Point('reservoir_liquid_level_realtime') - .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) + Point("reservoir_liquid_level_realtime") + .tag( + "date", + datetime.fromisoformat(get_real_value_time).strftime("%Y-%m-%d"), + ) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) .field("monitored_value", monitored_value) .field("datacleaning_value", None) .field("simulation_value", None) - .time(get_real_value_time_utc, write_precision='s') + .time(get_real_value_time_utc, write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -481,8 +551,10 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if tank_liquid_level_realtime_data_list: for data in tank_liquid_level_realtime_data_list: # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 - data_time = datetime.fromisoformat(data['time']) - get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + data_time = datetime.fromisoformat(data["time"]) + get_real_value_time_dt = datetime.fromisoformat( + get_real_value_time + ).replace(tzinfo=None) # 将获取的时间转换为 UTC 时间 get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) # 计算时间差(绝对值) @@ -491,17 +563,20 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if time_difference > 60: # 超过1分钟 monitored_value = None else: # 小于等于3分钟 - monitored_value = float(data['monitored_value']) + monitored_value = float(data["monitored_value"]) # 创建Point对象 point = ( - Point('tank_liquid_level_realtime') - .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) + Point("tank_liquid_level_realtime") + .tag( + "date", + datetime.fromisoformat(get_real_value_time).strftime("%Y-%m-%d"), + ) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) .field("monitored_value", monitored_value) .field("datacleaning_value", None) .field("simulation_value", None) - .time(get_real_value_time_utc, write_precision='s') + .time(get_real_value_time_utc, write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -509,8 +584,10 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if fixed_pump_realtime_data_list: for data in fixed_pump_realtime_data_list: # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 - data_time = datetime.fromisoformat(data['time']) - get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + data_time = datetime.fromisoformat(data["time"]) + get_real_value_time_dt = datetime.fromisoformat( + get_real_value_time + ).replace(tzinfo=None) # 将获取的时间转换为 UTC 时间 get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) # 计算时间差(绝对值) @@ -519,17 +596,20 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if time_difference > 60: # 超过1分钟 monitored_value = None else: # 小于等于3分钟 - monitored_value = float(data['monitored_value']) + monitored_value = float(data["monitored_value"]) # 创建Point对象 point = ( - Point('fixed_pump_realtime') - .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) + Point("fixed_pump_realtime") + .tag( + "date", + datetime.fromisoformat(get_real_value_time).strftime("%Y-%m-%d"), + ) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) .field("monitored_value", monitored_value) .field("datacleaning_value", None) .field("simulation_value", None) - .time(get_real_value_time_utc, write_precision='s') + .time(get_real_value_time_utc, write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -537,27 +617,32 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if variable_pump_realtime_data_list: for data in variable_pump_realtime_data_list: # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 - data_time = datetime.fromisoformat(data['time']) - get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + data_time = datetime.fromisoformat(data["time"]) + get_real_value_time_dt = datetime.fromisoformat( + get_real_value_time + ).replace(tzinfo=None) # 将获取的时间转换为 UTC 时间 get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) # 计算时间差(绝对值) time_difference = abs((data_time - get_real_value_time_dt).total_seconds()) # 判断时间差是否超过1分钟 - if time_difference > 60: # 超过1分钟 + if time_difference > 60: # 超过1分钟 monitored_value = None else: # 小于等于3分钟 - monitored_value = float(data['monitored_value']) + monitored_value = float(data["monitored_value"]) # 创建Point对象 point = ( - Point('variable_pump_realtime') - .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) + Point("variable_pump_realtime") + .tag( + "date", + datetime.fromisoformat(get_real_value_time).strftime("%Y-%m-%d"), + ) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) .field("monitored_value", monitored_value) .field("datacleaning_value", None) .field("simulation_value", None) - .time(get_real_value_time_utc, write_precision='s') + .time(get_real_value_time_utc, write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -565,8 +650,10 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if source_outflow_realtime_data_list: for data in source_outflow_realtime_data_list: # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 - data_time = datetime.fromisoformat(data['time']) - get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + data_time = datetime.fromisoformat(data["time"]) + get_real_value_time_dt = datetime.fromisoformat( + get_real_value_time + ).replace(tzinfo=None) # 将获取的时间转换为 UTC 时间 get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) # 计算时间差(绝对值) @@ -575,17 +662,20 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if time_difference > 60: # 超过1分钟 monitored_value = None else: # 小于等于3分钟 - monitored_value = float(data['monitored_value']) + monitored_value = float(data["monitored_value"]) # 创建Point对象 point = ( - Point('source_outflow_realtime') - .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) + Point("source_outflow_realtime") + .tag( + "date", + datetime.fromisoformat(get_real_value_time).strftime("%Y-%m-%d"), + ) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) .field("monitored_value", monitored_value) .field("datacleaning_value", None) .field("simulation_value", None) - .time(get_real_value_time_utc, write_precision='s') + .time(get_real_value_time_utc, write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -593,8 +683,10 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if pipe_flow_realtime_data_list: for data in pipe_flow_realtime_data_list: # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 - data_time = datetime.fromisoformat(data['time']) - get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + data_time = datetime.fromisoformat(data["time"]) + get_real_value_time_dt = datetime.fromisoformat( + get_real_value_time + ).replace(tzinfo=None) # 将获取的时间转换为 UTC 时间 get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) # 计算时间差(绝对值) @@ -603,17 +695,20 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if time_difference > 60: # 超过1分钟 monitored_value = None else: # 小于等于3分钟 - monitored_value = float(data['monitored_value']) + monitored_value = float(data["monitored_value"]) # 创建Point对象 point = ( - Point('pipe_flow_realtime') - .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) + Point("pipe_flow_realtime") + .tag( + "date", + datetime.fromisoformat(get_real_value_time).strftime("%Y-%m-%d"), + ) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) .field("monitored_value", monitored_value) .field("datacleaning_value", None) .field("simulation_value", None) - .time(get_real_value_time_utc, write_precision='s') + .time(get_real_value_time_utc, write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -621,8 +716,10 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if pressure_realtime_data_list: for data in pressure_realtime_data_list: # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 - data_time = datetime.fromisoformat(data['time']) - get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + data_time = datetime.fromisoformat(data["time"]) + get_real_value_time_dt = datetime.fromisoformat( + get_real_value_time + ).replace(tzinfo=None) # 将获取的时间转换为 UTC 时间 get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) # 计算时间差(绝对值) @@ -631,17 +728,20 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if time_difference > 60: # 超过1分钟 monitored_value = None else: # 小于等于3分钟 - monitored_value = float(data['monitored_value']) + monitored_value = float(data["monitored_value"]) # 创建Point对象 point = ( - Point('pressure_realtime') - .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) + Point("pressure_realtime") + .tag( + "date", + datetime.fromisoformat(get_real_value_time).strftime("%Y-%m-%d"), + ) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) .field("monitored_value", monitored_value) .field("datacleaning_value", None) .field("simulation_value", None) - .time(get_real_value_time_utc, write_precision='s') + .time(get_real_value_time_utc, write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -649,8 +749,10 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if demand_realtime_data_list: for data in demand_realtime_data_list: # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 - data_time = datetime.fromisoformat(data['time']) - get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + data_time = datetime.fromisoformat(data["time"]) + get_real_value_time_dt = datetime.fromisoformat( + get_real_value_time + ).replace(tzinfo=None) # 将获取的时间转换为 UTC 时间 get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) # 计算时间差(绝对值) @@ -659,17 +761,20 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if time_difference > 60: # 超过1分钟 monitored_value = None else: # 小于等于3分钟 - monitored_value = float(data['monitored_value']) + monitored_value = float(data["monitored_value"]) # 创建Point对象 point = ( - Point('demand_realtime') - .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) + Point("demand_realtime") + .tag( + "date", + datetime.fromisoformat(get_real_value_time).strftime("%Y-%m-%d"), + ) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) .field("monitored_value", monitored_value) .field("datacleaning_value", None) .field("simulation_value", None) - .time(get_real_value_time_utc, write_precision='s') + .time(get_real_value_time_utc, write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -677,8 +782,10 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if quality_realtime_data_list: for data in quality_realtime_data_list: # 将 data['time'] 和 get_realValue_time 转换为 datetime 对象 - data_time = datetime.fromisoformat(data['time']) - get_real_value_time_dt = datetime.fromisoformat(get_real_value_time).replace(tzinfo=None) + data_time = datetime.fromisoformat(data["time"]) + get_real_value_time_dt = datetime.fromisoformat( + get_real_value_time + ).replace(tzinfo=None) # 将获取的时间转换为 UTC 时间 get_real_value_time_utc = get_real_value_time_dt.astimezone(timezone.utc) # 计算时间差(绝对值) @@ -687,17 +794,20 @@ def store_realtime_SCADA_data_to_influxdb(get_real_value_time: str, bucket: str if time_difference > 60: # 超过1分钟 monitored_value = None else: # 小于等于3分钟 - monitored_value = float(data['monitored_value']) + monitored_value = float(data["monitored_value"]) # 创建Point对象 point = ( - Point('quality_realtime') - .tag("date", datetime.fromisoformat(get_real_value_time).strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) + Point("quality_realtime") + .tag( + "date", + datetime.fromisoformat(get_real_value_time).strftime("%Y-%m-%d"), + ) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) .field("monitored_value", monitored_value) .field("datacleaning_value", None) .field("simulation_value", None) - .time(get_real_value_time_utc, write_precision='s') + .time(get_real_value_time_utc, write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -719,13 +829,15 @@ def convert_time_format(original_time: str) -> str: :param original_time: str, “2024-04-13T08:00:00+08:00"格式的时间 :return: str,“2024-04-13 08:00:00”格式的时间 """ - new_time = original_time.replace('T', ' ') - new_time = new_time.replace('+08:00', '') + new_time = original_time.replace("T", " ") + new_time = new_time.replace("+08:00", "") return new_time # 2025/01/10 -def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bucket: str = "SCADA_data") -> None: +def store_non_realtime_SCADA_data_to_influxdb( + get_history_data_end_time: str, bucket: str = "SCADA_data" +) -> None: """ 获取某段时间内传回的scada数据 :param get_history_data_end_time: 获取历史数据的终止时间时间,格式如'2024-11-25T09:00:00+08:00' @@ -734,7 +846,11 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) # 本地变量,用于记录成功写入的数据点数量 points_written = 0 @@ -758,19 +874,23 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu write_api = client.write_api( write_options=create_write_options(), success_callback=success_callback, - error_callback=error_callback + error_callback=error_callback, ) # 创建一个临时存储点数据的列表 points_to_write = [] # 将end_date字符串转换为datetime对象 - end_date_dt = datetime.strptime(convert_time_format(get_history_data_end_time), '%Y-%m-%d %H:%M:%S') - end_date = end_date_dt.strftime('%Y-%m-%d %H:%M:%S') + end_date_dt = datetime.strptime( + convert_time_format(get_history_data_end_time), "%Y-%m-%d %H:%M:%S" + ) + end_date = end_date_dt.strftime("%Y-%m-%d %H:%M:%S") # 将transmission_frequency字符串转换为timedelta对象 - transmission_frequency_dt = datetime.strptime(globals.transmission_frequency, '%H:%M:%S') - datetime(1900, 1, 1) + transmission_frequency_dt = datetime.strptime( + globals.transmission_frequency, "%H:%M:%S" + ) - datetime(1900, 1, 1) get_history_data_start_time = end_date_dt - transmission_frequency_dt - begin_date = get_history_data_start_time.strftime('%Y-%m-%d %H:%M:%S') + begin_date = get_history_data_start_time.strftime("%Y-%m-%d %H:%M:%S") # print(begin_date) # print(end_date) reservoir_liquid_level_non_realtime_data_list = [] @@ -789,52 +909,72 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu # reservoir_liquid_level_non_realtime_data_list = get_data.get_history_data( # ids=','.join(reservoir_liquid_level_non_realtime_ids), begin_date=begin_date, end_date=end_date, downsample='1m') if globals.reservoir_liquid_level_non_realtime_ids: - reservoir_liquid_level_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.reservoir_liquid_level_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + reservoir_liquid_level_non_realtime_data_list = ( + get_data.get_history_data( + ids=",".join(globals.reservoir_liquid_level_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) + ) if globals.tank_liquid_level_non_realtime_ids: tank_liquid_level_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.tank_liquid_level_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.tank_liquid_level_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.fixed_pump_non_realtime_ids: fixed_pump_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.fixed_pump_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.fixed_pump_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.variable_pump_non_realtime_ids: variable_pump_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.variable_pump_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.variable_pump_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.source_outflow_non_realtime_ids: source_outflow_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.source_outflow_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.source_outflow_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.pipe_flow_non_realtime_ids: pipe_flow_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.pipe_flow_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.pipe_flow_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) # print(pipe_flow_non_realtime_data_list) if globals.pressure_non_realtime_ids: pressure_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.pressure_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.pressure_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) # print(pressure_non_realtime_data_list) if globals.demand_non_realtime_ids: demand_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.demand_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.demand_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.quality_non_realtime_ids: quality_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.quality_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.quality_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) except Exception as e: print(f"Attempt {try_count} failed with error: {e}") if try_count < 5: @@ -849,14 +989,14 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu for data in reservoir_liquid_level_non_realtime_data_list: # 创建Point对象 point = ( - Point('reservoir_liquid_level_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("reservoir_liquid_level_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -864,14 +1004,14 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu for data in tank_liquid_level_non_realtime_data_list: # 创建Point对象 point = ( - Point('tank_liquid_level_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("tank_liquid_level_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -879,14 +1019,14 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu for data in fixed_pump_non_realtime_data_list: # 创建Point对象 point = ( - Point('fixed_pump_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("fixed_pump_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -894,14 +1034,14 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu for data in variable_pump_non_realtime_data_list: # 创建Point对象 point = ( - Point('variable_pump_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("variable_pump_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -909,14 +1049,14 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu for data in source_outflow_non_realtime_data_list: # 创建Point对象 point = ( - Point('source_outflow_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("source_outflow_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -924,14 +1064,14 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu for data in pipe_flow_non_realtime_data_list: # 创建Point对象 point = ( - Point('pipe_flow_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("pipe_flow_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -939,14 +1079,14 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu for data in pressure_non_realtime_data_list: # 创建Point对象 point = ( - Point('pressure_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("pressure_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -954,14 +1094,14 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu for data in demand_non_realtime_data_list: # 创建Point对象 point = ( - Point('demand_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("demand_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -969,14 +1109,14 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu for data in quality_non_realtime_data_list: # 创建Point对象 point = ( - Point('quality_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("quality_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -994,7 +1134,9 @@ def store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time: str, bu # 2025/03/01 -def download_history_data_manually(begin_time: str, end_time: str, bucket: str = "SCADA_data") -> None: +def download_history_data_manually( + begin_time: str, end_time: str, bucket: str = "SCADA_data" +) -> None: """ 获取某个时间段内所有SCADA设备的历史数据,非实时执行,手动补充数据版 :param begin_time: 获取历史数据的开始时间,格式如'2024-11-25T09:00:00+08:00' @@ -1004,7 +1146,11 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) # 本地变量,用于记录成功写入的数据点数量 points_written = 0 @@ -1019,6 +1165,7 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = def error_callback(exception): print("Error writing batch:", exception) + # write_options = WriteOptions( # jitter_interval=200, # 添加抖动以避免同时写入 # max_retry_delay=30000 # 最大重试延迟(毫秒) @@ -1028,7 +1175,7 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = write_api = client.write_api( write_options=create_write_options(), success_callback=success_callback, - error_callback=error_callback + error_callback=error_callback, ) # 创建一个临时存储点数据的列表 points_to_write = [] @@ -1038,11 +1185,11 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = reservoir_liquid_level_realtime_data_list = [] tank_liquid_level_realtime_data_list = [] - fixed_pump_realtime_data_list =[] - variable_pump_realtime_data_list =[] + fixed_pump_realtime_data_list = [] + variable_pump_realtime_data_list = [] source_outflow_realtime_data_list = [] pipe_flow_realtime_data_list = [] - pressure_realtime_data_list =[] + pressure_realtime_data_list = [] demand_realtime_data_list = [] quality_realtime_data_list = [] @@ -1062,98 +1209,136 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = try_count += 1 if globals.reservoir_liquid_level_realtime_ids: reservoir_liquid_level_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.reservoir_liquid_level_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.reservoir_liquid_level_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.tank_liquid_level_realtime_ids: tank_liquid_level_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.tank_liquid_level_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.tank_liquid_level_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.fixed_pump_realtime_ids: fixed_pump_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.fixed_pump_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.fixed_pump_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.variable_pump_realtime_ids: variable_pump_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.variable_pump_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.variable_pump_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.source_outflow_realtime_ids: source_outflow_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.source_outflow_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.source_outflow_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.pipe_flow_realtime_ids: pipe_flow_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.pipe_flow_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.pipe_flow_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.pressure_realtime_ids: pressure_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.pressure_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.pressure_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.demand_realtime_ids: demand_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.demand_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.demand_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.quality_realtime_ids: quality_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.quality_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.quality_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) # reservoir_liquid_level_non_realtime_data_list = get_data.get_history_data( # ids=','.join(reservoir_liquid_level_non_realtime_ids), begin_date=begin_date, end_date=end_date, downsample='1m') if globals.reservoir_liquid_level_non_realtime_ids: - reservoir_liquid_level_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.reservoir_liquid_level_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + reservoir_liquid_level_non_realtime_data_list = ( + get_data.get_history_data( + ids=",".join(globals.reservoir_liquid_level_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) + ) if globals.tank_liquid_level_non_realtime_ids: tank_liquid_level_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.tank_liquid_level_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.tank_liquid_level_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.fixed_pump_non_realtime_ids: fixed_pump_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.fixed_pump_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.fixed_pump_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.variable_pump_non_realtime_ids: variable_pump_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.variable_pump_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.variable_pump_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.source_outflow_non_realtime_ids: source_outflow_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.source_outflow_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.source_outflow_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.pipe_flow_non_realtime_ids: pipe_flow_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.pipe_flow_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.pipe_flow_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) # print(pipe_flow_non_realtime_data_list) if globals.pressure_non_realtime_ids: pressure_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.pressure_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.pressure_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) # print(pressure_non_realtime_data_list) if globals.demand_non_realtime_ids: demand_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.demand_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.demand_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) if globals.quality_non_realtime_ids: quality_non_realtime_data_list = get_data.get_history_data( - ids=','.join(globals.quality_non_realtime_ids), - begin_date=begin_date, end_date=end_date, - downsample='1m') + ids=",".join(globals.quality_non_realtime_ids), + begin_date=begin_date, + end_date=end_date, + downsample="1m", + ) except Exception as e: print(f"Attempt {try_count} failed with error: {e}") if try_count < 5: @@ -1169,14 +1354,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in reservoir_liquid_level_realtime_data_list: # 创建Point对象 point = ( - Point('reservoir_liquid_level_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("reservoir_liquid_level_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1184,14 +1369,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in tank_liquid_level_realtime_data_list: # 创建Point对象 point = ( - Point('tank_liquid_level_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("tank_liquid_level_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1199,14 +1384,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in fixed_pump_realtime_data_list: # 创建Point对象 point = ( - Point('fixed_pump_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("fixed_pump_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1214,14 +1399,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in variable_pump_realtime_data_list: # 创建Point对象 point = ( - Point('variable_pump_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("variable_pump_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1229,14 +1414,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in source_outflow_realtime_data_list: # 创建Point对象 point = ( - Point('source_outflow_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("source_outflow_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1244,14 +1429,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in pipe_flow_realtime_data_list: # 创建Point对象 point = ( - Point('pipe_flow_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("pipe_flow_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1259,14 +1444,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in pressure_realtime_data_list: # 创建Point对象 point = ( - Point('pressure_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("pressure_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1274,14 +1459,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in demand_realtime_data_list: # 创建Point对象 point = ( - Point('demand_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("demand_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1289,14 +1474,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in quality_realtime_data_list: # 创建Point对象 point = ( - Point('quality_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("quality_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1304,14 +1489,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in reservoir_liquid_level_non_realtime_data_list: # 创建Point对象 point = ( - Point('reservoir_liquid_level_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("reservoir_liquid_level_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1319,14 +1504,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in tank_liquid_level_non_realtime_data_list: # 创建Point对象 point = ( - Point('tank_liquid_level_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("tank_liquid_level_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1334,14 +1519,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in fixed_pump_non_realtime_data_list: # 创建Point对象 point = ( - Point('fixed_pump_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("fixed_pump_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1349,14 +1534,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in variable_pump_non_realtime_data_list: # 创建Point对象 point = ( - Point('variable_pump_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("variable_pump_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1364,14 +1549,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in source_outflow_non_realtime_data_list: # 创建Point对象 point = ( - Point('source_outflow_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("source_outflow_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1379,14 +1564,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in pipe_flow_non_realtime_data_list: # 创建Point对象 point = ( - Point('pipe_flow_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("pipe_flow_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1394,14 +1579,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in pressure_non_realtime_data_list: # 创建Point对象 point = ( - Point('pressure_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("pressure_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1409,14 +1594,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in demand_non_realtime_data_list: # 创建Point对象 point = ( - Point('demand_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("demand_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1424,14 +1609,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = for data in quality_non_realtime_data_list: # 创建Point对象 point = ( - Point('quality_non_realtime') - .tag("date", data['time'].strftime('%Y-%m-%d')) - .tag("description", data['description']) - .tag("device_ID", data['device_ID']) - .field("monitored_value", float(data['monitored_value'])) + Point("quality_non_realtime") + .tag("date", data["time"].strftime("%Y-%m-%d")) + .tag("description", data["description"]) + .tag("device_ID", data["device_ID"]) + .field("monitored_value", float(data["monitored_value"])) .field("datacleaning_value", None) .field("simulation_value", None) - .time(data['time'], write_precision='s') + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -1447,11 +1632,14 @@ def download_history_data_manually(begin_time: str, end_time: str, bucket: str = client.close() + ########################SCADA############################################################################################################ -# DingZQ, 2025-03-08 -def query_all_SCADA_records_by_date(query_date: str, bucket: str="SCADA_data") -> list[dict[str, float]]: +# DingZQ, 2025-03-08 +def query_all_SCADA_records_by_date( + query_date: str, bucket: str = "SCADA_data" +) -> list[dict[str, float]]: """ 根据日期查询所有SCADA数据 @@ -1463,12 +1651,22 @@ def query_all_SCADA_records_by_date(query_date: str, bucket: str="SCADA_data") - """ client = get_new_client() - if client.ping(): print("{} -- Successfully connected to InfluxDB.".format( datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) - else: print("{} -- Failed to connect to InfluxDB.".format( datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + if client.ping(): + print( + "{} -- Successfully connected to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) + else: + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 将北京时间转换为 UTC 时间 - + bg_start_time, bg_end_time = time_api.parse_beijing_date_range(query_date) # bg_end_time = bg_start_time + timedelta(hours=2) # 服务器性能不行,暂时返回2个小时的数据 utc_start_time = bg_start_time.astimezone(timezone.utc) @@ -1480,12 +1678,12 @@ def query_all_SCADA_records_by_date(query_date: str, bucket: str="SCADA_data") - SCADA_results = [] # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_end_time.isoformat()}) |> filter(fn: (r) => r["_field"] == "monitored_value") |> sort(columns: ["_time"], desc: false) - ''' + """ # 执行查询 try: @@ -1499,12 +1697,12 @@ def query_all_SCADA_records_by_date(query_date: str, bucket: str="SCADA_data") - # 获取字段 "_value" 即为 monitored_value monitored_value = record.get_value() rec = { - "ID": record['device_ID'], # 是api_query 而不是 普通的Id - "time": record.get_time(), - record['_measurement']: monitored_value + "ID": record["device_ID"], # 是api_query 而不是 普通的Id + "time": record.get_time(), + record["_measurement"]: monitored_value, } SCADA_results.append(rec) - + except Exception as e: print(f"Error querying InfluxDB for date {query_date}: {e}") @@ -1513,8 +1711,9 @@ def query_all_SCADA_records_by_date(query_date: str, bucket: str="SCADA_data") - return SCADA_results - -def query_SCADA_data_by_device_ID_and_time(query_ids_list: List[str], query_time: str, bucket: str="SCADA_data") -> Dict[str, float]: +def query_SCADA_data_by_device_ID_and_time( + query_ids_list: List[str], query_time: str, bucket: str = "SCADA_data" +) -> Dict[str, float]: """ 根据SCADA设备的ID和时间查询值 :param query_ids_list: SCADA设备ID的列表 @@ -1524,7 +1723,11 @@ def query_SCADA_data_by_device_ID_and_time(query_ids_list: List[str], query_time """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 将北京时间转换为 UTC 时间 @@ -1536,11 +1739,11 @@ def query_SCADA_data_by_device_ID_and_time(query_ids_list: List[str], query_time SCADA_result_dict = {} for device_id in query_ids_list: # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["device_ID"] == "{device_id}" and r["_field"] == "monitored_value") - ''' + """ # 执行查询 try: result = query_api.query(flux_query) @@ -1559,12 +1762,17 @@ def query_SCADA_data_by_device_ID_and_time(query_ids_list: List[str], query_time print(f"Error querying InfluxDB for device ID {device_id}: {e}") SCADA_result_dict[device_id] = None client.close() - + return SCADA_result_dict -def query_scheme_SCADA_data_by_device_ID_and_time(query_ids_list: List[str], query_time: str, scheme_Type: str, - scheme_Name: str, bucket: str="scheme_simulation_result") -> Dict[str, float]: +def query_scheme_SCADA_data_by_device_ID_and_time( + query_ids_list: List[str], + query_time: str, + scheme_Type: str, + scheme_Name: str, + bucket: str = "scheme_simulation_result", +) -> Dict[str, float]: """ 根据SCADA设备的ID和时间查询方案中的值 :param query_ids_list: SCADA设备ID的列表 @@ -1574,7 +1782,11 @@ def query_scheme_SCADA_data_by_device_ID_and_time(query_ids_list: List[str], que """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 将北京时间转换为 UTC 时间 @@ -1586,11 +1798,11 @@ def query_scheme_SCADA_data_by_device_ID_and_time(query_ids_list: List[str], que SCADA_result_dict = {} for device_id in query_ids_list: # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["device_ID"] == "{device_id}" and r["_field"] == "monitored_value" and r["scheme_Type"] == "{scheme_Type}" and r["scheme_Name"] == "{scheme_Name}") - ''' + """ # 执行查询 try: result = query_api.query(flux_query) @@ -1608,17 +1820,23 @@ def query_scheme_SCADA_data_by_device_ID_and_time(query_ids_list: List[str], que except Exception as e: print(f"Error querying InfluxDB for device ID {device_id}: {e}") SCADA_result_dict[device_id] = None - + client.close() - + return SCADA_result_dict + # 2025/03/14 # DingZQ # 返回SCADA数据的原始值,其中可能包含了异常值跟缺失值,我们需要再后续曲线中修复 # 缺失值 # 异常值 -def query_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str], start_time: str, end_time: str, bucket: str="SCADA_data"): +def query_SCADA_data_by_device_ID_and_timerange( + query_ids_list: List[str], + start_time: str, + end_time: str, + bucket: str = "SCADA_data", +): """ 查询指定时间范围内,多个SCADA设备的数据,用于漏损定位 :param query_ids_list: SCADA设备ID的列表 @@ -1629,11 +1847,15 @@ def query_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str], start """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() - print('start_time', start_time) - print('end_time', end_time) + print("start_time", start_time) + print("end_time", end_time) # 将北京时间转换为 UTC 时间 # beijing_start_time = datetime.fromisoformat(start_time) # utc_start_time = beijing_start_time.astimezone(timezone.utc) - timedelta(seconds=1) @@ -1642,13 +1864,13 @@ def query_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str], start # utc_end_time = beijing_end_time.astimezone(timezone.utc) + timedelta(seconds=1) # print(utc_end_time) beijing_start_time = datetime.fromisoformat(start_time) - print('beijing_start_time', beijing_start_time) + print("beijing_start_time", beijing_start_time) utc_start_time = time_api.to_utc_time(beijing_start_time) - print('utc_start_time', utc_start_time) + print("utc_start_time", utc_start_time) beijing_end_time = datetime.fromisoformat(end_time) - print('beijing_end_time', beijing_end_time) + print("beijing_end_time", beijing_end_time) utc_stop_time = time_api.to_utc_time(beijing_end_time) - print('utc_stop_time', utc_stop_time) + print("utc_stop_time", utc_stop_time) SCADA_dict = {} for device_id in query_ids_list: # flux_query = f''' @@ -1658,31 +1880,36 @@ def query_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str], start # |> pivot(rowKey: ["_time"], columnKey: ["device_ID"], valueColumn: "_value") # |> sort(columns: ["_time"]) # ''' - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["device_ID"] == "{device_id}" and r["_field"] == "monitored_value") |> sort(columns: ["_time"]) - ''' + """ # 执行查询,返回一个 FluxTable 列表 tables = query_api.query(flux_query) records_list = [] for table in tables: for record in table.records: # 获取记录的时间和监测值 - records_list.append({ - "time": record["_time"], - "value": record["_value"] - }) + records_list.append( + {"time": record["_time"], "value": record["_value"]} + ) SCADA_dict[device_id] = records_list client.close() return SCADA_dict + # 2025/05/04 DingZQ # SCADA 原始数据有异常偏离,返回的是一个list,list的内容是清洗后的正常值,表示为 time + value -def query_cleaning_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str], start_time: str, end_time: str, bucket: str="SCADA_data"): +def query_cleaning_SCADA_data_by_device_ID_and_timerange( + query_ids_list: List[str], + start_time: str, + end_time: str, + bucket: str = "SCADA_data", +): """ 查询指定时间范围内,多个SCADA设备的修复的单个的数据 :param query_ids_list: SCADA设备ID的列表 @@ -1693,29 +1920,33 @@ def query_cleaning_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[st """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() - print('start_time', start_time) - print('end_time', end_time) + print("start_time", start_time) + print("end_time", end_time) # 将北京时间转换为 UTC 时间 beijing_start_time = datetime.fromisoformat(start_time) - print('beijing_start_time', beijing_start_time) + print("beijing_start_time", beijing_start_time) utc_start_time = time_api.to_utc_time(beijing_start_time) - print('utc_start_time', utc_start_time) + print("utc_start_time", utc_start_time) beijing_end_time = datetime.fromisoformat(end_time) - print('beijing_end_time', beijing_end_time) + print("beijing_end_time", beijing_end_time) utc_stop_time = time_api.to_utc_time(beijing_end_time) - print('utc_stop_time', utc_stop_time) + print("utc_stop_time", utc_stop_time) SCADA_dict = {} for device_id in query_ids_list: - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["device_ID"] == "{device_id}" and r["_field"] == "datacleaning_value") |> sort(columns: ["_time"]) - ''' + """ # 执行查询,返回一个 FluxTable 列表 tables = query_api.query(flux_query) print(tables) @@ -1723,19 +1954,24 @@ def query_cleaning_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[st for table in tables: for record in table.records: # 获取记录的时间和监测值 - records_list.append({ - "time": record["_time"], - "value": record["_value"] - }) + records_list.append( + {"time": record["_time"], "value": record["_value"]} + ) SCADA_dict[device_id] = records_list client.close() return SCADA_dict + # 2025/05/04 DingZQ # SCADA 数据原版缺失,根据历史数据的平均值补上缺失的部分 -def query_filling_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str], start_time: str, end_time: str, bucket: str="SCADA_data"): +def query_filling_SCADA_data_by_device_ID_and_timerange( + query_ids_list: List[str], + start_time: str, + end_time: str, + bucket: str = "SCADA_data", +): """ 查询指定时间范围内,多个SCADA设备的填补的单个的数据 :param query_ids_list: SCADA设备ID的列表 @@ -1746,30 +1982,34 @@ def query_filling_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() - print('start_time', start_time) - print('end_time', end_time) + print("start_time", start_time) + print("end_time", end_time) # 将北京时间转换为 UTC 时间 beijing_start_time = datetime.fromisoformat(start_time) - print('beijing_start_time', beijing_start_time) + print("beijing_start_time", beijing_start_time) utc_start_time = time_api.to_utc_time(beijing_start_time) - print('utc_start_time', utc_start_time) + print("utc_start_time", utc_start_time) beijing_end_time = datetime.fromisoformat(end_time) - print('beijing_end_time', beijing_end_time) + print("beijing_end_time", beijing_end_time) utc_stop_time = time_api.to_utc_time(beijing_end_time) - print('utc_stop_time', utc_stop_time) + print("utc_stop_time", utc_stop_time) SCADA_dict = {} for device_id in query_ids_list: - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["device_ID"] == "{device_id}" and r["_field"] == "datafilling_value") |> sort(columns: ["_time"]) - ''' + """ # 执行查询,返回一个 FluxTable 列表 tables = query_api.query(flux_query) print(tables) @@ -1777,19 +2017,24 @@ def query_filling_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str for table in tables: for record in table.records: # 获取记录的时间和监测值 - records_list.append({ - "time": record["_time"], - "value": record["_value"] - }) + records_list.append( + {"time": record["_time"], "value": record["_value"]} + ) SCADA_dict[device_id] = records_list client.close() return SCADA_dict + # 2025/05/04 DingZQ # 是把原始数据跟清洗后的数据合并到一起,暂时不需要用这个API -def query_cleaned_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str], start_time: str, end_time: str, bucket: str="SCADA_data"): +def query_cleaned_SCADA_data_by_device_ID_and_timerange( + query_ids_list: List[str], + start_time: str, + end_time: str, + bucket: str = "SCADA_data", +): """ 查询指定时间范围内,多个SCADA设备的清洗完毕后的完整数据 :param query_ids_list: SCADA设备ID的列表 @@ -1800,28 +2045,32 @@ def query_cleaned_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() - print('start_time', start_time) - print('end_time', end_time) + print("start_time", start_time) + print("end_time", end_time) # 将北京时间转换为 UTC 时间 beijing_start_time = datetime.fromisoformat(start_time) - print('beijing_start_time', beijing_start_time) + print("beijing_start_time", beijing_start_time) utc_start_time = time_api.to_utc_time(beijing_start_time) - print('utc_start_time', utc_start_time) + print("utc_start_time", utc_start_time) beijing_end_time = datetime.fromisoformat(end_time) - print('beijing_end_time', beijing_end_time) + print("beijing_end_time", beijing_end_time) utc_stop_time = time_api.to_utc_time(beijing_end_time) - print('utc_stop_time', utc_stop_time) + print("utc_stop_time", utc_stop_time) SCADA_dict = {} for device_id in query_ids_list: - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["device_ID"] == "{device_id}" and r["_field"] == "cleaned_value") |> sort(columns: ["_time"]) - ''' + """ # 执行查询,返回一个 FluxTable 列表 tables = query_api.query(flux_query) print(tables) @@ -1829,10 +2078,9 @@ def query_cleaned_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str for table in tables: for record in table.records: # 获取记录的时间和监测值 - records_list.append({ - "time": record["_time"], - "value": record["_value"] - }) + records_list.append( + {"time": record["_time"], "value": record["_value"]} + ) SCADA_dict[device_id] = records_list client.close() @@ -1841,7 +2089,9 @@ def query_cleaned_SCADA_data_by_device_ID_and_timerange(query_ids_list: List[str # DingZQ, 2025-02-15 -def query_SCADA_data_by_device_ID_and_date(query_ids_list: List[str], query_date: str, bucket: str="SCADA_data") -> list[dict[str, float]]: +def query_SCADA_data_by_device_ID_and_date( + query_ids_list: List[str], query_date: str, bucket: str = "SCADA_data" +) -> list[dict[str, float]]: """ 根据SCADA设备的ID和日期查询值 :param query_ids_list: SCADA设备ID的列表, 是api_query 而不是 普通的Id @@ -1852,14 +2102,19 @@ def query_SCADA_data_by_device_ID_and_date(query_ids_list: List[str], query_date """ start_time, end_time = time_api.parse_beijing_date_range(query_date) - - return query_SCADA_data_by_device_ID_and_timerange(query_ids_list, str(start_time), str(end_time), bucket) + return query_SCADA_data_by_device_ID_and_timerange( + query_ids_list, str(start_time), str(end_time), bucket + ) # 2025/02/01 -def store_realtime_simulation_result_to_influxdb(node_result_list: List[Dict[str, any]], link_result_list: List[Dict[str, any]], - result_start_time: str, bucket: str = "realtime_simulation_result"): +def store_realtime_simulation_result_to_influxdb( + node_result_list: List[Dict[str, any]], + link_result_list: List[Dict[str, any]], + result_start_time: str, + bucket: str = "realtime_simulation_result", +): """ 将实时模拟计算结果数据存储到 InfluxDB 的realtime_simulation_result这个bucket中。 :param node_result_list: (List[Dict[str, any]]): 包含节点和结果数据的字典列表。 @@ -1870,9 +2125,16 @@ def store_realtime_simulation_result_to_influxdb(node_result_list: List[Dict[str """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) - print("store_realtime_simulation_result_to_influxdb : result_start_time ", result_start_time) + print( + "store_realtime_simulation_result_to_influxdb : result_start_time ", + result_start_time, + ) # 本地变量,用于记录成功写入的数据点数量 points_written = 0 @@ -1894,51 +2156,57 @@ def store_realtime_simulation_result_to_influxdb(node_result_list: List[Dict[str write_api = client.write_api( write_options=create_write_options(), success_callback=success_callback, - error_callback=error_callback + error_callback=error_callback, ) # 创建一个临时存储点数据的列表 points_to_write = [] - date_str = result_start_time.split('T')[0] + date_str = result_start_time.split("T")[0] print("store_realtime_simulation_result_to_influxdb : date_str ", date_str) - time_beijing = datetime.strptime(result_start_time, '%Y-%m-%dT%H:%M:%S%z').isoformat() + time_beijing = datetime.strptime( + result_start_time, "%Y-%m-%dT%H:%M:%S%z" + ).isoformat() for result in node_result_list: # 提取节点信息和结果数据 - node_id = result.get('node') - data_list = result.get('result', []) + node_id = result.get("node") + data_list = result.get("result", []) for data in data_list: # 构建 Point 数据,多个 field 存在于一个数据点中 - node_point = Point("node") \ - .tag("date", date_str) \ - .tag("ID", node_id) \ - .field("head", data.get('head', 0.0)) \ - .field("pressure", data.get('pressure', 0.0)) \ - .field("actualdemand", data.get('demand', 0.0)) \ - .field("demanddeficit", None) \ - .field("totalExternalOutflow", None) \ - .field("quality", data.get('quality', 0.0)) \ - .time(time_beijing, write_precision='s') + node_point = ( + Point("node") + .tag("date", date_str) + .tag("ID", node_id) + .field("head", data.get("head", 0.0)) + .field("pressure", data.get("pressure", 0.0)) + .field("actualdemand", data.get("demand", 0.0)) + .field("demanddeficit", None) + .field("totalExternalOutflow", None) + .field("quality", data.get("quality", 0.0)) + .time(time_beijing, write_precision="s") + ) points_to_write.append(node_point) # 写入数据到 InfluxDB,多个 field 在同一个 point 中 # write_api.write(bucket=bucket, org=org_name, record=node_point) # write_api.flush() # print(f"成功将 {len(node_result_list)} 条node数据写入 InfluxDB。") for result in link_result_list: - link_id = result.get('link') - data_list = result.get('result', []) + link_id = result.get("link") + data_list = result.get("result", []) for data in data_list: - link_point = Point("link") \ - .tag("date", date_str) \ - .tag("ID", link_id) \ - .field("flow", data.get('flow', 0.0)) \ - .field("velocity", data.get('velocity', 0.0)) \ - .field("headloss", data.get('headloss', 0.0)) \ - .field("quality", data.get('quality', 0.0)) \ - .field("status", data.get('status', "UNKNOWN")) \ - .field("setting", data.get('setting', 0.0)) \ - .field("reaction", data.get('reaction', 0.0)) \ - .field("friction", data.get('friction', 0.0)) \ - .time(time_beijing, write_precision='s') + link_point = ( + Point("link") + .tag("date", date_str) + .tag("ID", link_id) + .field("flow", data.get("flow", 0.0)) + .field("velocity", data.get("velocity", 0.0)) + .field("headloss", data.get("headloss", 0.0)) + .field("quality", data.get("quality", 0.0)) + .field("status", data.get("status", "UNKNOWN")) + .field("setting", data.get("setting", 0.0)) + .field("reaction", data.get("reaction", 0.0)) + .field("friction", data.get("friction", 0.0)) + .time(time_beijing, write_precision="s") + ) points_to_write.append(link_point) # write_api.write(bucket=bucket, org=org_name, record=link_point) # write_api.flush() @@ -1951,7 +2219,7 @@ def store_realtime_simulation_result_to_influxdb(node_result_list: List[Dict[str except Exception as e: client.close() raise RuntimeError(f"数据写入 InfluxDB 时发生错误: {e}") - + time.sleep(10) print("Total points written:", points_written) @@ -1960,7 +2228,9 @@ def store_realtime_simulation_result_to_influxdb(node_result_list: List[Dict[str # 2025/02/01 -def query_latest_record_by_ID(ID: str, type: str, bucket: str="realtime_simulation_result") -> dict: +def query_latest_record_by_ID( + ID: str, type: str, bucket: str = "realtime_simulation_result" +) -> dict: """ 查询指定ID的最新的一条记录 :param ID: (str): 要查询的 ID。 @@ -1970,11 +2240,15 @@ def query_latest_record_by_ID(ID: str, type: str, bucket: str="realtime_simulati """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() if type == "node": - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: -1d, stop: now()) // 查找最近七天的记录 |> filter(fn: (r) => r["_measurement"] == "node" and r["ID"] == "{ID}") @@ -1986,7 +2260,7 @@ def query_latest_record_by_ID(ID: str, type: str, bucket: str="realtime_simulati |> group() // 将所有数据聚合到同一个 group |> sort(columns: ["_time"], desc: true) |> limit(n: 1) - ''' + """ tables = query_api.query(flux_query) # 解析查询结果 for table in tables: @@ -1999,10 +2273,10 @@ def query_latest_record_by_ID(ID: str, type: str, bucket: str="realtime_simulati "actualdemand": record["actualdemand"], # "demanddeficit": record["demanddeficit"], # "totalExternalOutflow": record["totalExternalOutflow"], - "quality": record["quality"] + "quality": record["quality"], } elif type == "link": - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: -1d, stop: now()) // 查找最近七天的记录 |> filter(fn: (r) => r["_measurement"] == "link" and r["ID"] == "{ID}") @@ -2014,7 +2288,7 @@ def query_latest_record_by_ID(ID: str, type: str, bucket: str="realtime_simulati |> group() // 将所有数据聚合到同一个 group |> sort(columns: ["_time"], desc: true) |> limit(n: 1) - ''' + """ tables = query_api.query(flux_query) # 解析查询结果 for table in tables: @@ -2029,14 +2303,16 @@ def query_latest_record_by_ID(ID: str, type: str, bucket: str="realtime_simulati "status": record["status"], "setting": record["setting"], "reaction": record["reaction"], - "friction": record["friction"] + "friction": record["friction"], } client.close() return None # 如果没有找到记录 # 2025/02/01 -def query_all_records_by_time(query_time: str, bucket: str="realtime_simulation_result") -> tuple: +def query_all_records_by_time( + query_time: str, bucket: str = "realtime_simulation_result" +) -> tuple: """ 查询指定北京时间的所有记录,包括 'node' 和 'link' measurement,分别以指定格式返回。 :param query_time: (str): 输入的北京时间,格式为 '2024-11-24T17:30:00+08:00'。 @@ -2045,7 +2321,11 @@ def query_all_records_by_time(query_time: str, bucket: str="realtime_simulation_ """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 将北京时间转换为 UTC 时间 @@ -2054,7 +2334,7 @@ def query_all_records_by_time(query_time: str, bucket: str="realtime_simulation_ utc_start_time = utc_time - timedelta(seconds=1) utc_stop_time = utc_time + timedelta(seconds=1) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["_measurement"] == "node" or r["_measurement"] == "link") @@ -2063,7 +2343,7 @@ def query_all_records_by_time(query_time: str, bucket: str="realtime_simulation_ columnKey:["_field"], valueColumn:"_value" ) - ''' + """ # 执行查询 tables = query_api.query(flux_query) node_records = [] @@ -2075,34 +2355,43 @@ def query_all_records_by_time(query_time: str, bucket: str="realtime_simulation_ measurement = record["_measurement"] # 处理 node 数据 if measurement == "node": - node_records.append({ - "time": record["_time"], - "ID": record["ID"], - "head": record["head"], - "pressure": record["pressure"], - "actualdemand": record["actualdemand"], - "quality": record["quality"] - }) + node_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "head": record["head"], + "pressure": record["pressure"], + "actualdemand": record["actualdemand"], + "quality": record["quality"], + } + ) # 处理 link 数据 elif measurement == "link": - link_records.append({ - "time": record["_time"], - "ID": record["ID"], - "flow": record["flow"], - "velocity": record["velocity"], - "headloss": record["headloss"], - "quality": record["quality"], - "status": record["status"], - "setting": record["setting"], - "reaction": record["reaction"], - "friction": record["friction"] - }) + link_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "flow": record["flow"], + "velocity": record["velocity"], + "headloss": record["headloss"], + "quality": record["quality"], + "status": record["status"], + "setting": record["setting"], + "reaction": record["reaction"], + "friction": record["friction"], + } + ) client.close() return node_records, link_records # 2025/03/03 -def query_all_record_by_time_property(query_time: str, type: str, property: str, bucket: str="realtime_simulation_result") -> list: +def query_all_record_by_time_property( + query_time: str, + type: str, + property: str, + bucket: str = "realtime_simulation_result", +) -> list: """ 查询指定北京时间的所有记录,查询 'node' 或 'link' 的某一属性值,以指定格式返回。 :param query_time: (str): 输入的北京时间,格式为 '2024-11-24T17:30:00+08:00'。 @@ -2113,7 +2402,11 @@ def query_all_record_by_time_property(query_time: str, type: str, property: str, """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 确定 measurement @@ -2129,11 +2422,11 @@ def query_all_record_by_time_property(query_time: str, type: str, property: str, utc_start_time = utc_time - timedelta(seconds=1) utc_stop_time = utc_time + timedelta(seconds=1) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["_measurement"] == "{measurement}" and r["_field"] == "{property}") - ''' + """ # 执行查询 tables = query_api.query(flux_query) result_records = [] @@ -2141,16 +2434,247 @@ def query_all_record_by_time_property(query_time: str, type: str, property: str, for table in tables: for record in table.records: # print(record.values) # 打印完整记录内容 - result_records.append({ - "ID": record["ID"], - "value": record["_value"] - }) + result_records.append({"ID": record["ID"], "value": record["_value"]}) client.close() return result_records +def query_all_scheme_record_by_time_property( + query_time: str, + type: str, + property: str, + scheme_name: str, + bucket: str = "scheme_simulation_result", +) -> list: + """ + 查询指定北京时间的所有记录,查询 'node' 或 'link' 的某一属性值,以指定格式返回(新版本)。 + + :param query_time: (str): 输入的北京时间,格式为 '2024-11-24T17:30:00+08:00'。 + :param type: (str): 查询的类型(决定 measurement),'node' 或 'link' + :param property: (str): 查询的字段名称(field) + :param scheme_name: (str): 方案名称(如 "FANGAN1761124840355") + :param bucket: (str): 数据存储的 bucket 名称。 + :return: list(dict): result_records + """ + client = get_new_client() + if not client.ping(): + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) + + query_api = client.query_api() + + # 确定 measurement + if type == "node": + measurement = "node" + elif type == "link": + measurement = "link" + else: + raise ValueError(f"不支持的类型: {type}") + + # 将北京时间转换为 UTC 时间 + beijing_time = datetime.fromisoformat(query_time) + utc_time = beijing_time.astimezone(timezone.utc) + utc_start_time = utc_time - timedelta(seconds=1) + utc_stop_time = utc_time + timedelta(seconds=1) + # 构建 Flux 查询语句 + flux_query = f""" + from(bucket: "{bucket}") + |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) + |> filter(fn: (r) => r["scheme_Name"] == "{scheme_name}" and r["_measurement"] == "{measurement}" and r["_field"] == "{property}") + """ + # 执行查询 + tables = query_api.query(flux_query) + + result_records = [] + + # 解析查询结果 + for table in tables: + for record in table.records: + result_records.append({"ID": record["ID"], "value": record["_value"]}) + + client.close() + return result_records + + +def query_scheme_simulation_result_by_ID_time( + scheme_name: str, + ID: str, + type: str, + query_time: str, + bucket: str = "scheme_simulation_result", +) -> list[dict]: + """ + 查询指定ID在指定时间的记录 + :param ID: (str): 要查询的 ID。 + :param type: (str): "node"或“link” + :param query_time: (str): 查询的时间,格式为 '2024-11-24T17:30:00+08:00'。 + :param bucket: (str): 数据存储的 bucket 名称。 + :return: list[dict]: 指定时间的记录数据列表,如果没有找到则返回空列表。 + """ + client = get_new_client() + if not client.ping(): + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) + + query_api = client.query_api() + # 将北京时间转换为 UTC 时间 + beijing_time = datetime.fromisoformat(query_time) + utc_time = beijing_time.astimezone(timezone.utc) + utc_start_time = utc_time - timedelta(seconds=1) + utc_stop_time = utc_time + timedelta(seconds=1) + results = [] + if type == "node": + flux_query = f""" + from(bucket: "{bucket}") + |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) + |> filter(fn: (r) => r["scheme_Name"] == "{scheme_name}" and r["_measurement"] == "node" and r["ID"] == "{ID}") + |> pivot( + rowKey:["_time"], + columnKey:["_field"], + valueColumn:"_value" + ) + """ + tables = query_api.query(flux_query) + # 解析查询结果 + for table in tables: + for record in table.records: + results.append( + { + "time": record["_time"], + "ID": ID, + "head": record["head"], + "pressure": record["pressure"], + "actualdemand": record["actualdemand"], + "quality": record["quality"], + } + ) + elif type == "link": + flux_query = f""" + from(bucket: "{bucket}") + |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) + |> filter(fn: (r) => r["scheme_Name"] == "{scheme_name}" and r["_measurement"] == "link" and r["ID"] == "{ID}") + |> pivot( + rowKey:["_time"], + columnKey:["_field"], + valueColumn:"_value" + ) + """ + tables = query_api.query(flux_query) + # 解析查询结果 + for table in tables: + for record in table.records: + results.append( + { + "time": record["_time"], + "ID": ID, + "flow": record["flow"], + "velocity": record["velocity"], + "headloss": record["headloss"], + "quality": record["quality"], + "status": record["status"], + "setting": record["setting"], + "reaction": record["reaction"], + "friction": record["friction"], + } + ) + client.close() + return results # 返回列表,如果没有记录则为空列表 + + +def query_simulation_result_by_ID_time( + ID: str, type: str, query_time: str, bucket: str = "realtime_simulation_result" +) -> list[dict]: + """ + 查询指定ID在指定时间的记录 + :param ID: (str): 要查询的 ID。 + :param type: (str): "node"或“link” + :param query_time: (str): 查询的时间,格式为 '2024-11-24T17:30:00+08:00'。 + :param bucket: (str): 数据存储的 bucket 名称。 + :return: list[dict]: 指定时间的记录数据列表,如果没有找到则返回空列表。 + """ + client = get_new_client() + if not client.ping(): + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) + + query_api = client.query_api() + # 将北京时间转换为 UTC 时间 + beijing_time = datetime.fromisoformat(query_time) + utc_time = beijing_time.astimezone(timezone.utc) + utc_start_time = utc_time - timedelta(seconds=1) + utc_stop_time = utc_time + timedelta(seconds=1) + results = [] + if type == "node": + flux_query = f""" + from(bucket: "{bucket}") + |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) + |> filter(fn: (r) => r["_measurement"] == "node" and r["ID"] == "{ID}") + |> pivot( + rowKey:["_time"], + columnKey:["_field"], + valueColumn:"_value" + ) + """ + tables = query_api.query(flux_query) + # 解析查询结果 + for table in tables: + for record in table.records: + results.append( + { + "time": record["_time"], + "ID": ID, + "head": record["head"], + "pressure": record["pressure"], + "actualdemand": record["actualdemand"], + "quality": record["quality"], + } + ) + elif type == "link": + flux_query = f""" + from(bucket: "{bucket}") + |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) + |> filter(fn: (r) => r["_measurement"] == "link" and r["ID"] == "{ID}") + |> pivot( + rowKey:["_time"], + columnKey:["_field"], + valueColumn:"_value" + ) + """ + tables = query_api.query(flux_query) + # 解析查询结果 + for table in tables: + for record in table.records: + results.append( + { + "time": record["_time"], + "ID": ID, + "flow": record["flow"], + "velocity": record["velocity"], + "headloss": record["headloss"], + "quality": record["quality"], + "status": record["status"], + "setting": record["setting"], + "reaction": record["reaction"], + "friction": record["friction"], + } + ) + client.close() + return results # 返回列表,如果没有记录则为空列表 + + # 2025/02/21 -def query_all_records_by_date(query_date: str, bucket: str="realtime_simulation_result") -> tuple: +def query_all_records_by_date( + query_date: str, bucket: str = "realtime_simulation_result" +) -> tuple: """ 查询指定日期的所有记录,包括‘node’和‘link’,分别以指定的格式返回 :param query_date: 输入的日期,格式为‘2025-02-14’ @@ -2160,27 +2684,37 @@ def query_all_records_by_date(query_date: str, bucket: str="realtime_simulation_ client = get_new_client() # 记录开始时间 time_cost_start = time.perf_counter() - print('{} -- query_all_records_by_date started.'.format(datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- query_all_records_by_date started.".format( + datetime.now(pytz.timezone("Asia/Shanghai")).strftime("%Y-%m-%d %H:%M:%S") + ) + ) if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() - bg_start_time, bg_end_time = time_api.parse_beijing_date_range(query_date=query_date) + bg_start_time, bg_end_time = time_api.parse_beijing_date_range( + query_date=query_date + ) utc_start_time = time_api.to_utc_time(bg_start_time) utc_stop_time = time_api.to_utc_time(bg_end_time) print("bg_start_time", bg_start_time) print("bg_end_time", bg_end_time) - print('utc_start_time', utc_start_time) - print('utc_stop_time', utc_stop_time) + print("utc_start_time", utc_start_time) + print("utc_stop_time", utc_stop_time) - print('utc_start_time.isoformat', utc_start_time.isoformat()) - print('utc_stop_time.isoformat', utc_stop_time.isoformat()) + print("utc_start_time.isoformat", utc_start_time.isoformat()) + print("utc_stop_time.isoformat", utc_stop_time.isoformat()) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["_measurement"] == "node" or r["_measurement"] == "link" and r["date"] == "{query_date}") @@ -2189,7 +2723,7 @@ def query_all_records_by_date(query_date: str, bucket: str="realtime_simulation_ columnKey:["_field"], valueColumn:"_value" ) - ''' + """ # 执行查询 tables = query_api.query(flux_query) node_records = [] @@ -2201,37 +2735,49 @@ def query_all_records_by_date(query_date: str, bucket: str="realtime_simulation_ measurement = record["_measurement"] # 处理 node 数据 if measurement == "node": - node_records.append({ - "time": record["_time"], - "ID": record["ID"], - "head": record["head"], - "pressure": record["pressure"], - "actualdemand": record["actualdemand"], - "quality": record["quality"] - }) + node_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "head": record["head"], + "pressure": record["pressure"], + "actualdemand": record["actualdemand"], + "quality": record["quality"], + } + ) # 处理 link 数据 elif measurement == "link": - link_records.append({ - "time": record["_time"], - "ID": record["ID"], - "flow": record["flow"], - "velocity": record["velocity"], - "headloss": record["headloss"], - "quality": record["quality"], - "status": record["status"], - "setting": record["setting"], - "reaction": record["reaction"], - "friction": record["friction"] - }) + link_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "flow": record["flow"], + "velocity": record["velocity"], + "headloss": record["headloss"], + "quality": record["quality"], + "status": record["status"], + "setting": record["setting"], + "reaction": record["reaction"], + "friction": record["friction"], + } + ) time_cost_end = time.perf_counter() - print('{} -- query_all_records_by_date finished, cost time: {:.2f} s.'.format( datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'), time_cost_end - time_cost_start)) + print( + "{} -- query_all_records_by_date finished, cost time: {:.2f} s.".format( + datetime.now(pytz.timezone("Asia/Shanghai")).strftime("%Y-%m-%d %H:%M:%S"), + time_cost_end - time_cost_start, + ) + ) client.close() return node_records, link_records + # 2025/04/12 DingZQ -def query_all_records_by_time_range(starttime: str, endtime: str, bucket: str="realtime_simulation_result") -> tuple: +def query_all_records_by_time_range( + starttime: str, endtime: str, bucket: str = "realtime_simulation_result" +) -> tuple: """ 查询指定时间范围内的所有记录,包括‘node’和‘link’,分别以指定的格式返回 :param starttime: 输入的开始时间,格式为‘2025-02-14T16:00:00+08:00’ @@ -2243,10 +2789,18 @@ def query_all_records_by_time_range(starttime: str, endtime: str, bucket: str="r # 记录开始时间 time_cost_start = time.perf_counter() - print('{} -- query_all_records_by_date started.'.format(datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- query_all_records_by_date started.".format( + datetime.now(pytz.timezone("Asia/Shanghai")).strftime("%Y-%m-%d %H:%M:%S") + ) + ) if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() @@ -2257,14 +2811,14 @@ def query_all_records_by_time_range(starttime: str, endtime: str, bucket: str="r print("bg_start_time", bg_start_time) print("bg_end_time", bg_end_time) - print('utc_start_time', utc_start_time) - print('utc_stop_time', utc_stop_time) + print("utc_start_time", utc_start_time) + print("utc_stop_time", utc_stop_time) - print('utc_start_time.isoformat', utc_start_time.isoformat()) - print('utc_stop_time.isoformat', utc_stop_time.isoformat()) + print("utc_start_time.isoformat", utc_start_time.isoformat()) + print("utc_stop_time.isoformat", utc_stop_time.isoformat()) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["_measurement"] == "node" or r["_measurement"] == "link" and r["date"] == "{query_date}") @@ -2273,7 +2827,7 @@ def query_all_records_by_time_range(starttime: str, endtime: str, bucket: str="r columnKey:["_field"], valueColumn:"_value" ) - ''' + """ # 执行查询 tables = query_api.query(flux_query) @@ -2287,38 +2841,50 @@ def query_all_records_by_time_range(starttime: str, endtime: str, bucket: str="r measurement = record["_measurement"] # 处理 node 数据 if measurement == "node": - node_records.append({ - "time": record["_time"], - "ID": record["ID"], - "head": record["head"], - "pressure": record["pressure"], - "actualdemand": record["actualdemand"], - "quality": record["quality"] - }) + node_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "head": record["head"], + "pressure": record["pressure"], + "actualdemand": record["actualdemand"], + "quality": record["quality"], + } + ) # 处理 link 数据 elif measurement == "link": - link_records.append({ - "time": record["_time"], - "ID": record["ID"], - "flow": record["flow"], - "velocity": record["velocity"], - "headloss": record["headloss"], - "quality": record["quality"], - "status": record["status"], - "setting": record["setting"], - "reaction": record["reaction"], - "friction": record["friction"] - }) + link_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "flow": record["flow"], + "velocity": record["velocity"], + "headloss": record["headloss"], + "quality": record["quality"], + "status": record["status"], + "setting": record["setting"], + "reaction": record["reaction"], + "friction": record["friction"], + } + ) time_cost_end = time.perf_counter() - print('{} -- query_all_records_by_date finished, cost time: {:.2f} s.'.format( datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'), time_cost_end - time_cost_start)) + print( + "{} -- query_all_records_by_date finished, cost time: {:.2f} s.".format( + datetime.now(pytz.timezone("Asia/Shanghai")).strftime("%Y-%m-%d %H:%M:%S"), + time_cost_end - time_cost_start, + ) + ) client.close() return node_records, link_records + # 2025/03/15 DingZQ -def query_all_records_by_date_with_type(query_date: str, query_type: str, bucket: str="realtime_simulation_result") -> list: +def query_all_records_by_date_with_type( + query_date: str, query_type: str, bucket: str = "realtime_simulation_result" +) -> list: """ 查询指定日期的所有记录,包括‘node’和‘link’,分别以指定的格式返回 :param query_date: 输入的日期,格式为‘2025-02-14’ @@ -2332,26 +2898,32 @@ def query_all_records_by_date_with_type(query_date: str, query_type: str, bucket time_cost_start = time.perf_counter() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format( datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() - bg_start_time, bg_end_time = time_api.parse_beijing_date_range(query_date=query_date) + bg_start_time, bg_end_time = time_api.parse_beijing_date_range( + query_date=query_date + ) utc_start_time = time_api.to_utc_time(bg_start_time) utc_stop_time = time_api.to_utc_time(bg_end_time) print("bg_start_time", bg_start_time) print("bg_end_time", bg_end_time) - print('utc_start_time', utc_start_time) - print('utc_stop_time', utc_stop_time) + print("utc_start_time", utc_start_time) + print("utc_stop_time", utc_stop_time) - print('utc_start_time.isoformat', utc_start_time.isoformat()) - print('utc_stop_time.isoformat', utc_stop_time.isoformat()) + print("utc_start_time.isoformat", utc_start_time.isoformat()) + print("utc_stop_time.isoformat", utc_stop_time.isoformat()) - print('measurement', query_type) + print("measurement", query_type) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["_measurement"] == "{query_type}" and r["date"] == "{query_date}") @@ -2360,7 +2932,7 @@ def query_all_records_by_date_with_type(query_date: str, query_type: str, bucket columnKey:["_field"], valueColumn:"_value" ) - ''' + """ # 执行查询 tables = query_api.query(flux_query) result_records = [] @@ -2371,36 +2943,46 @@ def query_all_records_by_date_with_type(query_date: str, query_type: str, bucket measurement = record["_measurement"] # 处理 node 数据 if measurement == "node": - result_records.append({ - "time": record["_time"], - "ID": record["ID"], - "head": record["head"], - "pressure": record["pressure"], - "actualdemand": record["actualdemand"], - "quality": record["quality"] - }) + result_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "head": record["head"], + "pressure": record["pressure"], + "actualdemand": record["actualdemand"], + "quality": record["quality"], + } + ) # 处理 link 数据 elif measurement == "link": - result_records.append({ - "time": record["_time"], - "ID": record["ID"], - "flow": record["flow"], - "velocity": record["velocity"], - "headloss": record["headloss"], - "quality": record["quality"], - "status": record["status"], - "setting": record["setting"], - "reaction": record["reaction"], - "friction": record["friction"] - }) + result_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "flow": record["flow"], + "velocity": record["velocity"], + "headloss": record["headloss"], + "quality": record["quality"], + "status": record["status"], + "setting": record["setting"], + "reaction": record["reaction"], + "friction": record["friction"], + } + ) time_cost_end = time.perf_counter() client.close() return result_records + # 2025/02/21 -def query_all_record_by_date_property(query_date: str, type: str, property: str, bucket: str="realtime_simulation_result") -> list: +def query_all_record_by_date_property( + query_date: str, + type: str, + property: str, + bucket: str = "realtime_simulation_result", +) -> list: """ 查询指定日期的‘node’或‘link’的某一属性值的所有记录,以指定的格式返回 :param query_date: 输入的日期,格式为‘2025-02-14’ @@ -2412,10 +2994,17 @@ def query_all_record_by_date_property(query_date: str, type: str, property: str, client = get_new_client() # 记录开始时间 time_cost_start = time.perf_counter() - print('{} -- Hydraulic simulation started.'.format( - datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Hydraulic simulation started.".format( + datetime.now(pytz.timezone("Asia/Shanghai")).strftime("%Y-%m-%d %H:%M:%S") + ) + ) if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 确定 measurement @@ -2426,14 +3015,22 @@ def query_all_record_by_date_property(query_date: str, type: str, property: str, else: raise ValueError(f"不支持的类型: {type}") # 将 start_date 的北京时间转换为 UTC 时间 - start_time = (datetime.strptime(query_date, "%Y-%m-%d") - timedelta(days=1)).replace(hour=16, minute=0, second=0, tzinfo=timezone.utc).isoformat() - stop_time = datetime.strptime(query_date, "%Y-%m-%d").replace(hour=15, minute=59, second=59, tzinfo=timezone.utc).isoformat() + start_time = ( + (datetime.strptime(query_date, "%Y-%m-%d") - timedelta(days=1)) + .replace(hour=16, minute=0, second=0, tzinfo=timezone.utc) + .isoformat() + ) + stop_time = ( + datetime.strptime(query_date, "%Y-%m-%d") + .replace(hour=15, minute=59, second=59, tzinfo=timezone.utc) + .isoformat() + ) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["_measurement"] == "{measurement}" and r["date"] == "{query_date}" and r["_field"] == "{property}") - ''' + """ # 执行查询 tables = query_api.query(flux_query) result_records = [] @@ -2441,21 +3038,29 @@ def query_all_record_by_date_property(query_date: str, type: str, property: str, for table in tables: for record in table.records: # print(record.values) # 打印完整记录内容 - result_records.append({ - "ID": record["ID"], - "time": record["_time"], - "value": record["_value"] - }) + result_records.append( + {"ID": record["ID"], "time": record["_time"], "value": record["_value"]} + ) time_cost_end = time.perf_counter() - print('{} -- Hydraulic simulation finished, cost time: {:.2f} s.'.format( - datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'), - time_cost_end - time_cost_start)) + print( + "{} -- Hydraulic simulation finished, cost time: {:.2f} s.".format( + datetime.now(pytz.timezone("Asia/Shanghai")).strftime("%Y-%m-%d %H:%M:%S"), + time_cost_end - time_cost_start, + ) + ) client.close() return result_records # 2025/02/01 -def query_curve_by_ID_property_daterange(ID: str, type: str, property: str, start_date: str, end_date: str, bucket: str="realtime_simulation_result") -> list: +def query_curve_by_ID_property_daterange( + ID: str, + type: str, + property: str, + start_date: str, + end_date: str, + bucket: str = "realtime_simulation_result", +) -> list: """ 根据 type 查询对应的 measurement,根据 ID 和 date 查询对应的 tag,根据 property 查询对应的 field。 :param ID: (str): 要查询的 ID(tag) @@ -2468,7 +3073,11 @@ def query_curve_by_ID_property_daterange(ID: str, type: str, property: str, star """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 确定 measurement @@ -2483,32 +3092,43 @@ def query_curve_by_ID_property_daterange(ID: str, type: str, property: str, star # start_time = previous_day.isoformat() + "T16:00:00Z" # stop_time = datetime.strptime(end_date, "%Y-%m-%d").isoformat() + "T15:59:59Z" # 将 start_date 的北京时间转换为 UTC 时间范围 - start_time = (datetime.strptime(start_date, "%Y-%m-%d") - timedelta(days=1)).replace(hour=16, minute=0, second=0, tzinfo=timezone.utc).isoformat() - stop_time = datetime.strptime(end_date, "%Y-%m-%d").replace(hour=15, minute=59, second=59, tzinfo=timezone.utc).isoformat() + start_time = ( + (datetime.strptime(start_date, "%Y-%m-%d") - timedelta(days=1)) + .replace(hour=16, minute=0, second=0, tzinfo=timezone.utc) + .isoformat() + ) + stop_time = ( + datetime.strptime(end_date, "%Y-%m-%d") + .replace(hour=15, minute=59, second=59, tzinfo=timezone.utc) + .isoformat() + ) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["_measurement"] == "{measurement}" and r["ID"] == "{ID}" and r["_field"] == "{property}") - ''' + """ # 执行查询 tables = query_api.query(flux_query) # 解析查询结果 results = [] for table in tables: for record in table.records: - results.append({ - "time": record["_time"], - "value": record["_value"] - }) + results.append({"time": record["_time"], "value": record["_value"]}) client.close() return results # 2025/02/13 -def store_scheme_simulation_result_to_influxdb(node_result_list: List[Dict[str, any]], link_result_list: List[Dict[str, any]], - scheme_start_time: str, num_periods: int = 1, scheme_Type: str = None, scheme_Name: str = None, - bucket: str = "scheme_simulation_result"): +def store_scheme_simulation_result_to_influxdb( + node_result_list: List[Dict[str, any]], + link_result_list: List[Dict[str, any]], + scheme_start_time: str, + num_periods: int = 1, + scheme_Type: str = None, + scheme_Name: str = None, + bucket: str = "scheme_simulation_result", +): """ 将方案模拟计算结果存入 InfluxuDb 的scheme_simulation_result这个bucket中。 :param node_result_list: (List[Dict[str, any]]): 包含节点和结果数据的字典列表。 @@ -2522,7 +3142,11 @@ def store_scheme_simulation_result_to_influxdb(node_result_list: List[Dict[str, """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) try: # 本地变量,用于记录成功写入的数据点数量 @@ -2538,6 +3162,7 @@ def store_scheme_simulation_result_to_influxdb(node_result_list: List[Dict[str, def error_callback(exception): print("Error writing batch:", exception) + # write_options = WriteOptions( # jitter_interval=200, # 添加抖动以避免同时写入 # max_retry_delay=30000 # 最大重试延迟(毫秒) @@ -2546,59 +3171,67 @@ def store_scheme_simulation_result_to_influxdb(node_result_list: List[Dict[str, write_api = client.write_api( write_options=create_write_options(), success_callback=success_callback, - error_callback=error_callback + error_callback=error_callback, ) # 创建一个临时存储点数据的列表 points_to_write = [] - date_str = scheme_start_time.split('T')[0] - time_beijing = datetime.strptime(scheme_start_time, '%Y-%m-%dT%H:%M:%S%z') - timestep_parts = globals.hydraulic_timestep.split(':') - timestep = timedelta(hours=int(timestep_parts[0]), minutes=int(timestep_parts[1]), seconds=int(timestep_parts[2])) + date_str = scheme_start_time.split("T")[0] + time_beijing = datetime.strptime(scheme_start_time, "%Y-%m-%dT%H:%M:%S%z") + timestep_parts = globals.hydraulic_timestep.split(":") + timestep = timedelta( + hours=int(timestep_parts[0]), + minutes=int(timestep_parts[1]), + seconds=int(timestep_parts[2]), + ) for node_result in node_result_list: # 提取节点信息和数据结果 - node_id = node_result.get('node') + node_id = node_result.get("node") # 从period 0 到 period num_period - 1 for period_index in range(num_periods): scheme_time = (time_beijing + (timestep * period_index)).isoformat() - data_list = [node_result.get('result', [])[period_index]] + data_list = [node_result.get("result", [])[period_index]] for data in data_list: # 构建 Point 数据,多个 field 存在于一个数据点中 - node_point = Point("node") \ - .tag("date", date_str) \ - .tag("ID", node_id) \ - .tag("scheme_Type", scheme_Type) \ - .tag("scheme_Name", scheme_Name) \ - .field("head", data.get('head', 0.0)) \ - .field("pressure", data.get('pressure', 0.0)) \ - .field("actualdemand", data.get('demand', 0.0)) \ - .field("demanddeficit", None) \ - .field("totalExternalOutflow", None) \ - .field("quality", data.get('quality', 0.0)) \ - .time(scheme_time, write_precision='s') + node_point = ( + Point("node") + .tag("date", date_str) + .tag("ID", node_id) + .tag("scheme_Type", scheme_Type) + .tag("scheme_Name", scheme_Name) + .field("head", data.get("head", 0.0)) + .field("pressure", data.get("pressure", 0.0)) + .field("actualdemand", data.get("demand", 0.0)) + .field("demanddeficit", None) + .field("totalExternalOutflow", None) + .field("quality", data.get("quality", 0.0)) + .time(scheme_time, write_precision="s") + ) points_to_write.append(node_point) # 写入数据到 InfluxDB,多个 field 在同一个 point 中 # write_api.write(bucket=bucket, org=org_name, record=node_point) # write_api.flush() for link_result in link_result_list: - link_id = link_result.get('link') + link_id = link_result.get("link") for period_index in range(num_periods): scheme_time = (time_beijing + (timestep * period_index)).isoformat() - data_list = [link_result.get('result', [])[period_index]] + data_list = [link_result.get("result", [])[period_index]] for data in data_list: - link_point = Point("link") \ - .tag("date", date_str) \ - .tag("ID", link_id) \ - .tag("scheme_Type", scheme_Type) \ - .tag("scheme_Name", scheme_Name) \ - .field("flow", data.get('flow', 0.0)) \ - .field("velocity", data.get('velocity', 0.0)) \ - .field("headloss", data.get('headloss', 0.0)) \ - .field("quality", data.get('quality', 0.0)) \ - .field("status", data.get('status', "UNKNOWN")) \ - .field("setting", data.get('setting', 0.0)) \ - .field("reaction", data.get('reaction', 0.0)) \ - .field("friction", data.get('friction', 0.0)) \ - .time(scheme_time, write_precision='s') + link_point = ( + Point("link") + .tag("date", date_str) + .tag("ID", link_id) + .tag("scheme_Type", scheme_Type) + .tag("scheme_Name", scheme_Name) + .field("flow", data.get("flow", 0.0)) + .field("velocity", data.get("velocity", 0.0)) + .field("headloss", data.get("headloss", 0.0)) + .field("quality", data.get("quality", 0.0)) + .field("status", data.get("status", "UNKNOWN")) + .field("setting", data.get("setting", 0.0)) + .field("reaction", data.get("reaction", 0.0)) + .field("friction", data.get("friction", 0.0)) + .time(scheme_time, write_precision="s") + ) points_to_write.append(link_point) # write_api.write(bucket=bucket, org=org_name, record=link_point) # write_api.flush() @@ -2631,21 +3264,29 @@ def query_corresponding_query_id_and_element_id(name: str) -> None: with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 查询 transmission_mode 为 'realtime' 的记录 - cur.execute(""" + cur.execute( + """ SELECT type, associated_element_id, api_query_id FROM scada_info WHERE type IN ('source_outflow', 'pipe_flow', 'demand', 'pressure', 'quality'); - """) + """ + ) records = cur.fetchall() # 遍历查询结果,根据 type 分类存入对应的字典 for record in records: record_type, associated_element_id, api_query_id = record if record_type == "source_outflow": - globals.scheme_source_outflow_ids[api_query_id] = associated_element_id + globals.scheme_source_outflow_ids[api_query_id] = ( + associated_element_id + ) elif record_type == "pipe_flow": - globals.scheme_pipe_flow_ids[api_query_id] = associated_element_id + globals.scheme_pipe_flow_ids[api_query_id] = ( + associated_element_id + ) elif record_type == "pressure": - globals.scheme_pressure_ids[api_query_id] = associated_element_id + globals.scheme_pressure_ids[api_query_id] = ( + associated_element_id + ) elif record_type == "demand": globals.scheme_demand_ids[api_query_id] = associated_element_id elif record_type == "quality": @@ -2664,12 +3305,13 @@ def query_corresponding_query_id_and_element_id(name: str) -> None: # def auto_get_burst_flow(): - - - # 2025/03/11 -def fill_scheme_simulation_result_to_SCADA(scheme_Type: str = None, scheme_Name: str = None, query_date: str = None, - bucket: str = "scheme_simulation_result"): +def fill_scheme_simulation_result_to_SCADA( + scheme_Type: str = None, + scheme_Name: str = None, + query_date: str = None, + bucket: str = "scheme_simulation_result", +): """ :param scheme_Type: 方案类型 :param scheme_Name: 方案名称 @@ -2679,7 +3321,11 @@ def fill_scheme_simulation_result_to_SCADA(scheme_Type: str = None, scheme_Name: """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) # 本地变量,用于记录成功写入的数据点数量 points_written = 0 @@ -2702,88 +3348,118 @@ def fill_scheme_simulation_result_to_SCADA(scheme_Type: str = None, scheme_Name: write_api = client.write_api( write_options=create_write_options(), success_callback=success_callback, - error_callback=error_callback + error_callback=error_callback, ) # 创建一个临时存储点数据的列表 points_to_write = [] # 查找associated_element_id的对应值 for key, value in globals.scheme_source_outflow_ids.items(): - scheme_source_outflow_result = (query_scheme_curve_by_ID_property(scheme_Type=scheme_Type, scheme_Name=scheme_Name, - query_date=query_date, ID=value, type='link', property='flow')) + scheme_source_outflow_result = query_scheme_curve_by_ID_property( + scheme_Type=scheme_Type, + scheme_Name=scheme_Name, + query_date=query_date, + ID=value, + type="link", + property="flow", + ) # print(f"Key: {key}, Query result: {scheme_source_outflow_result}") # 调试输出 for data in scheme_source_outflow_result: point = ( - Point('scheme_source_outflow') + Point("scheme_source_outflow") .tag("date", query_date) .tag("device_ID", key) .tag("scheme_Type", scheme_Type) .tag("scheme_Name", scheme_Name) - .field("monitored_value", data['value']) - .time(data['time'], write_precision='s') + .field("monitored_value", data["value"]) + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) for key, value in globals.scheme_pipe_flow_ids.items(): - scheme_pipe_flow_result = (query_scheme_curve_by_ID_property(scheme_Type=scheme_Type, scheme_Name=scheme_Name, - query_date=query_date, ID=value, type='link', property='flow')) + scheme_pipe_flow_result = query_scheme_curve_by_ID_property( + scheme_Type=scheme_Type, + scheme_Name=scheme_Name, + query_date=query_date, + ID=value, + type="link", + property="flow", + ) for data in scheme_pipe_flow_result: point = ( - Point('scheme_pipe_flow') + Point("scheme_pipe_flow") .tag("date", query_date) .tag("device_ID", key) .tag("scheme_Type", scheme_Type) .tag("scheme_Name", scheme_Name) - .field("monitored_value", data['value']) - .time(data['time'], write_precision='s') + .field("monitored_value", data["value"]) + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) for key, value in globals.scheme_pressure_ids.items(): - scheme_pressure_result = (query_scheme_curve_by_ID_property(scheme_Type=scheme_Type, scheme_Name=scheme_Name, - query_date=query_date, ID=value, type='node', property='pressure')) + scheme_pressure_result = query_scheme_curve_by_ID_property( + scheme_Type=scheme_Type, + scheme_Name=scheme_Name, + query_date=query_date, + ID=value, + type="node", + property="pressure", + ) for data in scheme_pressure_result: point = ( - Point('scheme_pressure') + Point("scheme_pressure") .tag("date", query_date) .tag("device_ID", key) .tag("scheme_Type", scheme_Type) .tag("scheme_Name", scheme_Name) - .field("monitored_value", data['value']) - .time(data['time'], write_precision='s') + .field("monitored_value", data["value"]) + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) for key, value in globals.scheme_demand_ids.items(): - scheme_demand_result = (query_scheme_curve_by_ID_property(scheme_Type=scheme_Type, scheme_Name=scheme_Name, - query_date=query_date, ID=value, type='node', property='actualdemand')) + scheme_demand_result = query_scheme_curve_by_ID_property( + scheme_Type=scheme_Type, + scheme_Name=scheme_Name, + query_date=query_date, + ID=value, + type="node", + property="actualdemand", + ) for data in scheme_demand_result: point = ( - Point('scheme_demand') + Point("scheme_demand") .tag("date", query_date) .tag("device_ID", key) .tag("scheme_Type", scheme_Type) .tag("scheme_Name", scheme_Name) - .field("monitored_value", data['value']) - .time(data['time'], write_precision='s') + .field("monitored_value", data["value"]) + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) for key, value in globals.scheme_quality_ids.items(): - scheme_quality_result = (query_scheme_curve_by_ID_property(scheme_Type=scheme_Type, scheme_Name=scheme_Name, - query_date=query_date, ID=value, type='node', property='quality')) + scheme_quality_result = query_scheme_curve_by_ID_property( + scheme_Type=scheme_Type, + scheme_Name=scheme_Name, + query_date=query_date, + ID=value, + type="node", + property="quality", + ) for data in scheme_quality_result: point = ( - Point('scheme_quality') + Point("scheme_quality") .tag("date", query_date) .tag("device_ID", key) .tag("scheme_Type", scheme_Type) .tag("scheme_Name", scheme_Name) - .field("monitored_value", data['value']) - .time(data['time'], write_precision='s') + .field("monitored_value", data["value"]) + .time(data["time"], write_precision="s") ) points_to_write.append(point) # write_api.write(bucket=bucket, org=org_name, record=point) @@ -2801,7 +3477,9 @@ def fill_scheme_simulation_result_to_SCADA(scheme_Type: str = None, scheme_Name: # 2025/02/15 -def query_SCADA_data_curve(api_query_id: str, start_date: str, end_date: str, bucket: str="SCADA_data") -> list: +def query_SCADA_data_curve( + api_query_id: str, start_date: str, end_date: str, bucket: str = "SCADA_data" +) -> list: """ 根据SCADA设备的api_query_id和时间范围,查询得到曲线,查到的数据为0时区时间 :param api_query_id: SCADA设备的api_query_id @@ -2812,34 +3490,48 @@ def query_SCADA_data_curve(api_query_id: str, start_date: str, end_date: str, bu """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 将 start_date 的北京时间转换为 UTC 时间范围 - start_time = (datetime.strptime(start_date, "%Y-%m-%d") - timedelta(days=1)).replace(hour=16, minute=0, second=0, tzinfo=timezone.utc).isoformat() - stop_time = datetime.strptime(end_date, "%Y-%m-%d").replace(hour=15, minute=59, second=59, tzinfo=timezone.utc).isoformat() + start_time = ( + (datetime.strptime(start_date, "%Y-%m-%d") - timedelta(days=1)) + .replace(hour=16, minute=0, second=0, tzinfo=timezone.utc) + .isoformat() + ) + stop_time = ( + datetime.strptime(end_date, "%Y-%m-%d") + .replace(hour=15, minute=59, second=59, tzinfo=timezone.utc) + .isoformat() + ) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["device_ID"] == "{api_query_id}" and r["_field"] == "monitored_value") - ''' + """ # 执行查询 tables = query_api.query(flux_query) # 解析查询结果 results = [] for table in tables: for record in table.records: - results.append({ - "time": record["_time"], - "value": record["_value"] - }) + results.append({"time": record["_time"], "value": record["_value"]}) client.close() return results # 2025/02/18 -def query_scheme_all_record_by_time(scheme_Type: str, scheme_Name: str, query_time: str, bucket: str="scheme_simulation_result") -> tuple: +def query_scheme_all_record_by_time( + scheme_Type: str, + scheme_Name: str, + query_time: str, + bucket: str = "scheme_simulation_result", +) -> tuple: """ 查询指定方案某一时刻的所有记录,包括‘node'和‘link’,分别以指定格式返回。 :param scheme_Type: 方案类型 @@ -2850,7 +3542,11 @@ def query_scheme_all_record_by_time(scheme_Type: str, scheme_Name: str, query_ti """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 将北京时间转换为 UTC 时间 @@ -2859,7 +3555,7 @@ def query_scheme_all_record_by_time(scheme_Type: str, scheme_Name: str, query_ti utc_start_time = utc_time - timedelta(seconds=1) utc_stop_time = utc_time + timedelta(seconds=1) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["scheme_Type"] == "{scheme_Type}" and r["scheme_Name"] == "{scheme_Name}" and r["_measurement"] == "node" or r["_measurement"] == "link") @@ -2868,7 +3564,7 @@ def query_scheme_all_record_by_time(scheme_Type: str, scheme_Name: str, query_ti columnKey:["_field"], valueColumn:"_value" ) - ''' + """ # 执行查询 tables = query_api.query(flux_query) node_records = [] @@ -2880,35 +3576,45 @@ def query_scheme_all_record_by_time(scheme_Type: str, scheme_Name: str, query_ti measurement = record["_measurement"] # 处理 node 数据 if measurement == "node": - node_records.append({ - "time": record["_time"], - "ID": record["ID"], - "head": record["head"], - "pressure": record["pressure"], - "actualdemand": record["actualdemand"], - "quality": record["quality"] - }) + node_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "head": record["head"], + "pressure": record["pressure"], + "actualdemand": record["actualdemand"], + "quality": record["quality"], + } + ) # 处理 link 数据 elif measurement == "link": - link_records.append({ - "time": record["_time"], - "ID": record["ID"], - "flow": record["flow"], - "velocity": record["velocity"], - "headloss": record["headloss"], - "quality": record["quality"], - "status": record["status"], - "setting": record["setting"], - "reaction": record["reaction"], - "friction": record["friction"] - }) + link_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "flow": record["flow"], + "velocity": record["velocity"], + "headloss": record["headloss"], + "quality": record["quality"], + "status": record["status"], + "setting": record["setting"], + "reaction": record["reaction"], + "friction": record["friction"], + } + ) client.close() return node_records, link_records # 2025/03/04 -def query_scheme_all_record_by_time_property(scheme_Type: str, scheme_Name: str, query_time: str, type: str, property: str, - bucket: str="scheme_simulation_result") -> list: +def query_scheme_all_record_by_time_property( + scheme_Type: str, + scheme_Name: str, + query_time: str, + type: str, + property: str, + bucket: str = "scheme_simulation_result", +) -> list: """ 查询指定方案某一时刻‘node'或‘link’某一属性值,以指定格式返回。 :param scheme_Type: 方案类型 @@ -2921,7 +3627,11 @@ def query_scheme_all_record_by_time_property(scheme_Type: str, scheme_Name: str, """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 确定 measurement @@ -2937,28 +3647,32 @@ def query_scheme_all_record_by_time_property(scheme_Type: str, scheme_Name: str, utc_start_time = utc_time - timedelta(seconds=1) utc_stop_time = utc_time + timedelta(seconds=1) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["scheme_Type"] == "{scheme_Type}" and r["scheme_Name"] == "{scheme_Name}" and r["_measurement"] == "{measurement}" and r["_field"] == "{property}") - ''' + """ # 执行查询 tables = query_api.query(flux_query) result_records = [] # 解析查询结果 for table in tables: for record in table.records: - result_records.append({ - "ID": record["ID"], - "value": record["_value"] - }) + result_records.append({"ID": record["ID"], "value": record["_value"]}) client.close() return result_records # 2025/02/19 -def query_scheme_curve_by_ID_property(scheme_Type: str, scheme_Name: str, query_date: str, ID: str, type: str, property: str, - bucket: str="scheme_simulation_result") -> list: +def query_scheme_curve_by_ID_property( + scheme_Type: str, + scheme_Name: str, + query_date: str, + ID: str, + type: str, + property: str, + bucket: str = "scheme_simulation_result", +) -> list: """ 根据scheme_Type和scheme_Name,查询该模拟方案中,某一node或link的某一属性值的所有时间的结果 :param scheme_Type: 方案类型 @@ -2972,7 +3686,11 @@ def query_scheme_curve_by_ID_property(scheme_Type: str, scheme_Name: str, query_ """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 确定 measurement @@ -2982,31 +3700,41 @@ def query_scheme_curve_by_ID_property(scheme_Type: str, scheme_Name: str, query_ measurement = "link" else: raise ValueError(f"不支持的类型: {type}") - start_time = (datetime.strptime(query_date, "%Y-%m-%d") - timedelta(days=1)).replace(hour=16, minute=0, second=0, tzinfo=timezone.utc).isoformat() - stop_time = datetime.strptime(query_date, "%Y-%m-%d").replace(hour=15, minute=59, second=59, tzinfo=timezone.utc).isoformat() + start_time = ( + (datetime.strptime(query_date, "%Y-%m-%d") - timedelta(days=1)) + .replace(hour=16, minute=0, second=0, tzinfo=timezone.utc) + .isoformat() + ) + stop_time = ( + datetime.strptime(query_date, "%Y-%m-%d") + .replace(hour=15, minute=59, second=59, tzinfo=timezone.utc) + .isoformat() + ) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["_measurement"] == "{measurement}" and r["scheme_Type"] == "{scheme_Type}" and r["scheme_Name"] == "{scheme_Name}" and r["ID"] == "{ID}" and r["_field"] == "{property}") - ''' + """ # 执行查询 tables = query_api.query(flux_query) # 解析查询结果 results = [] for table in tables: for record in table.records: - results.append({ - "time": record["_time"], - "value": record["_value"] - }) + results.append({"time": record["_time"], "value": record["_value"]}) client.close() return results # 2025/02/21 -def query_scheme_all_record(scheme_Type: str, scheme_Name: str, query_date: str, bucket: str="scheme_simulation_result") -> tuple: +def query_scheme_all_record( + scheme_Type: str, + scheme_Name: str, + query_date: str, + bucket: str = "scheme_simulation_result", +) -> tuple: """ 查询指定方案的所有记录,包括‘node'和‘link’,分别以指定格式返回。 :param scheme_Type: 方案类型 @@ -3017,18 +3745,24 @@ def query_scheme_all_record(scheme_Type: str, scheme_Name: str, query_date: str, """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() - bg_start_time, bg_end_time = time_api.parse_beijing_date_range(query_date=query_date) + bg_start_time, bg_end_time = time_api.parse_beijing_date_range( + query_date=query_date + ) utc_start_time = time_api.to_utc_time(bg_start_time) utc_stop_time = time_api.to_utc_time(bg_end_time) print(utc_start_time, utc_stop_time) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()}) |> filter(fn: (r) => r["scheme_Type"] == "{scheme_Type}" and r["scheme_Name"] == "{scheme_Name}" and r["_measurement"] == "node" or r["_measurement"] == "link") @@ -3037,7 +3771,7 @@ def query_scheme_all_record(scheme_Type: str, scheme_Name: str, query_date: str, columnKey:["_field"], valueColumn:"_value" ) - ''' + """ # 执行查询 tables = query_api.query(flux_query) node_records = [] @@ -3049,35 +3783,45 @@ def query_scheme_all_record(scheme_Type: str, scheme_Name: str, query_date: str, measurement = record["_measurement"] # 处理 node 数据 if measurement == "node": - node_records.append({ - "time": record["_time"], - "ID": record["ID"], - "head": record["head"], - "pressure": record["pressure"], - "actualdemand": record["actualdemand"], - "quality": record["quality"] - }) + node_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "head": record["head"], + "pressure": record["pressure"], + "actualdemand": record["actualdemand"], + "quality": record["quality"], + } + ) # 处理 link 数据 elif measurement == "link": - link_records.append({ - "time": record["_time"], - "ID": record["ID"], - "flow": record["flow"], - "velocity": record["velocity"], - "headloss": record["headloss"], - "quality": record["quality"], - "status": record["status"], - "setting": record["setting"], - "reaction": record["reaction"], - "friction": record["friction"] - }) + link_records.append( + { + "time": record["_time"], + "ID": record["ID"], + "flow": record["flow"], + "velocity": record["velocity"], + "headloss": record["headloss"], + "quality": record["quality"], + "status": record["status"], + "setting": record["setting"], + "reaction": record["reaction"], + "friction": record["friction"], + } + ) client.close() return node_records, link_records # 2025/03/04 -def query_scheme_all_record_property(scheme_Type: str, scheme_Name: str, query_date: str, type: str, property: str, - bucket: str="scheme_simulation_result") -> list: +def query_scheme_all_record_property( + scheme_Type: str, + scheme_Name: str, + query_date: str, + type: str, + property: str, + bucket: str = "scheme_simulation_result", +) -> list: """ 查询指定方案的‘node'或‘link’的某一属性值,以指定格式返回。 :param scheme_Type: 方案类型 @@ -3090,7 +3834,11 @@ def query_scheme_all_record_property(scheme_Type: str, scheme_Name: str, query_d """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 确定 measurement @@ -3100,31 +3848,39 @@ def query_scheme_all_record_property(scheme_Type: str, scheme_Name: str, query_d measurement = "link" else: raise ValueError(f"不支持的类型: {type}") - start_time = (datetime.strptime(query_date, "%Y-%m-%d") - timedelta(days=1)).replace(hour=16, minute=0, second=0, tzinfo=timezone.utc).isoformat() - stop_time = datetime.strptime(query_date, "%Y-%m-%d").replace(hour=15, minute=59, second=59, tzinfo=timezone.utc).isoformat() + start_time = ( + (datetime.strptime(query_date, "%Y-%m-%d") - timedelta(days=1)) + .replace(hour=16, minute=0, second=0, tzinfo=timezone.utc) + .isoformat() + ) + stop_time = ( + datetime.strptime(query_date, "%Y-%m-%d") + .replace(hour=15, minute=59, second=59, tzinfo=timezone.utc) + .isoformat() + ) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["scheme_Type"] == "{scheme_Type}" and r["scheme_Name"] == "{scheme_Name}" and r["date"] == "{query_date}" and r["_measurement"] == "{measurement}" and r["_field"] == "{property}") - ''' + """ # 执行查询 tables = query_api.query(flux_query) result_records = [] # 解析查询结果 for table in tables: for record in table.records: - result_records.append({ - "time": record["_time"], - "ID": record["ID"], - "value": record["_value"] - }) + result_records.append( + {"time": record["_time"], "ID": record["ID"], "value": record["_value"]} + ) client.close() return result_records # 2025/02/16 -def export_SCADA_data_to_csv(start_date: str, end_date: str, bucket: str="SCADA_data") -> None: +def export_SCADA_data_to_csv( + start_date: str, end_date: str, bucket: str = "SCADA_data" +) -> None: """ 导出influxdb中SCADA_data这个bucket的数据到csv中 :param start_date: 查询开始的时间,格式为 'YYYY-MM-DD' @@ -3134,17 +3890,29 @@ def export_SCADA_data_to_csv(start_date: str, end_date: str, bucket: str="SCADA_ """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 将 start_date 的北京时间转换为 UTC 时间范围 - start_time = (datetime.strptime(start_date, "%Y-%m-%d") - timedelta(days=1)).replace(hour=16, minute=0, second=0, tzinfo=timezone.utc).isoformat() - stop_time = datetime.strptime(end_date, "%Y-%m-%d").replace(hour=15, minute=59, second=59, tzinfo=timezone.utc).isoformat() + start_time = ( + (datetime.strptime(start_date, "%Y-%m-%d") - timedelta(days=1)) + .replace(hour=16, minute=0, second=0, tzinfo=timezone.utc) + .isoformat() + ) + stop_time = ( + datetime.strptime(end_date, "%Y-%m-%d") + .replace(hour=15, minute=59, second=59, tzinfo=timezone.utc) + .isoformat() + ) # 构建 Flux 查询语句 - flux_query = f''' + flux_query = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) - ''' + """ # 执行查询 tables = query_api.query(flux_query) # 存储查询结果 @@ -3152,21 +3920,45 @@ def export_SCADA_data_to_csv(start_date: str, end_date: str, bucket: str="SCADA_ for table in tables: for record in table.records: row = { - 'time': record.get_time(), - 'measurement': record.get_measurement(), - 'date': record.values.get('date', None), - 'description': record.values.get('description', None), - 'device_ID': record.values.get('device_ID', None), - 'monitored_value': record.get_value() if record.get_field() == 'monitored_value' else None, - 'datacleaning_value': record.get_value() if record.get_field() == 'datacleaning_value' else None, - 'simulation_value': record.get_value() if record.get_field() == 'simulation_value' else None, + "time": record.get_time(), + "measurement": record.get_measurement(), + "date": record.values.get("date", None), + "description": record.values.get("description", None), + "device_ID": record.values.get("device_ID", None), + "monitored_value": ( + record.get_value() + if record.get_field() == "monitored_value" + else None + ), + "datacleaning_value": ( + record.get_value() + if record.get_field() == "datacleaning_value" + else None + ), + "simulation_value": ( + record.get_value() + if record.get_field() == "simulation_value" + else None + ), } rows.append(row) # 动态生成 CSV 文件名 csv_filename = f"SCADA_data_{start_date}至{end_date}.csv" # 写入到 CSV 文件 - with open(csv_filename, mode='w', newline='') as file: - writer = csv.DictWriter(file, fieldnames=['time', 'measurement', 'date', 'description', 'device_ID', 'monitored_value', 'datacleaning_value', 'simulation_value']) + with open(csv_filename, mode="w", newline="") as file: + writer = csv.DictWriter( + file, + fieldnames=[ + "time", + "measurement", + "date", + "description", + "device_ID", + "monitored_value", + "datacleaning_value", + "simulation_value", + ], + ) writer.writeheader() writer.writerows(rows) print(f"Data exported to {csv_filename} successfully.") @@ -3174,7 +3966,9 @@ def export_SCADA_data_to_csv(start_date: str, end_date: str, bucket: str="SCADA_ # 2025/02/17 -def export_realtime_simulation_result_to_csv(start_date: str, end_date: str, bucket: str="realtime_simulation_result") -> None: +def export_realtime_simulation_result_to_csv( + start_date: str, end_date: str, bucket: str = "realtime_simulation_result" +) -> None: """ 导出influxdb中realtime_simulation_result这个bucket的数据到csv中 :param start_date: 查询开始的时间,格式为 'YYYY-MM-DD' @@ -3184,18 +3978,30 @@ def export_realtime_simulation_result_to_csv(start_date: str, end_date: str, buc """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 将 start_date 的北京时间转换为 UTC 时间范围 - start_time = (datetime.strptime(start_date, "%Y-%m-%d") - timedelta(days=1)).replace(hour=16, minute=0, second=0, tzinfo=timezone.utc).isoformat() - stop_time = datetime.strptime(end_date, "%Y-%m-%d").replace(hour=15, minute=59, second=59, tzinfo=timezone.utc).isoformat() + start_time = ( + (datetime.strptime(start_date, "%Y-%m-%d") - timedelta(days=1)) + .replace(hour=16, minute=0, second=0, tzinfo=timezone.utc) + .isoformat() + ) + stop_time = ( + datetime.strptime(end_date, "%Y-%m-%d") + .replace(hour=15, minute=59, second=59, tzinfo=timezone.utc) + .isoformat() + ) # 构建 Flux 查询语句,查询指定时间范围内的数据 - flux_query_link = f''' + flux_query_link = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["_measurement"] == "link") - ''' + """ # 执行查询 link_tables = query_api.query(flux_query_link) # 存储link类的数据 @@ -3203,19 +4009,19 @@ def export_realtime_simulation_result_to_csv(start_date: str, end_date: str, buc link_data = {} for table in link_tables: for record in table.records: - key = (record.get_time(), record.values.get('ID', None)) + key = (record.get_time(), record.values.get("ID", None)) if key not in link_data: link_data[key] = {} field = record.get_field() link_data[key][field] = record.get_value() - link_data[key]['measurement'] = record.get_measurement() - link_data[key]['date'] = record.values.get('date', None) + link_data[key]["measurement"] = record.get_measurement() + link_data[key]["date"] = record.values.get("date", None) # 构建 Flux 查询语句,查询指定时间范围内的数据 - flux_query_node = f''' + flux_query_node = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["_measurement"] == "node") - ''' + """ # 执行查询 node_tables = query_api.query(flux_query_node) # 存储node类的数据 @@ -3223,33 +4029,63 @@ def export_realtime_simulation_result_to_csv(start_date: str, end_date: str, buc node_data = {} for table in node_tables: for record in table.records: - key = (record.get_time(), record.values.get('ID', None)) + key = (record.get_time(), record.values.get("ID", None)) if key not in node_data: node_data[key] = {} field = record.get_field() node_data[key][field] = record.get_value() - node_data[key]['measurement'] = record.get_measurement() - node_data[key]['date'] = record.values.get('date', None) + node_data[key]["measurement"] = record.get_measurement() + node_data[key]["date"] = record.values.get("date", None) for key in set(link_data.keys()): - row = {'time': key[0], "ID": key[1]} + row = {"time": key[0], "ID": key[1]} row.update(link_data.get(key, {})) link_rows.append(row) for key in set(node_data.keys()): - row = {'time': key[0], "ID": key[1]} + row = {"time": key[0], "ID": key[1]} row.update(node_data.get(key, {})) node_rows.append(row) # 动态生成 CSV 文件名 csv_filename_link = f"realtime_simulation_link_result_{start_date}至{end_date}.csv" csv_filename_node = f"realtime_simulation_node_result_{start_date}至{end_date}.csv" # 写入到 CSV 文件 - with open(csv_filename_link, mode='w', newline='') as file: - writer = csv.DictWriter(file, fieldnames=['time', 'measurement', 'date', 'ID', 'flow', 'leakage', 'velocity', 'headloss', 'status', 'setting', 'quality', 'friction', 'reaction']) + with open(csv_filename_link, mode="w", newline="") as file: + writer = csv.DictWriter( + file, + fieldnames=[ + "time", + "measurement", + "date", + "ID", + "flow", + "leakage", + "velocity", + "headloss", + "status", + "setting", + "quality", + "friction", + "reaction", + ], + ) writer.writeheader() writer.writerows(link_rows) - with open(csv_filename_node, mode='w', newline='') as file: - writer = csv.DictWriter(file, fieldnames=['time', 'measurement', 'date', 'ID', 'head', 'pressure', 'actualdemand', - 'demanddeficit', 'totalExternalOutflow', 'quality']) + with open(csv_filename_node, mode="w", newline="") as file: + writer = csv.DictWriter( + file, + fieldnames=[ + "time", + "measurement", + "date", + "ID", + "head", + "pressure", + "actualdemand", + "demanddeficit", + "totalExternalOutflow", + "quality", + ], + ) writer.writeheader() writer.writerows(node_rows) print(f"Data exported to {csv_filename_link} and {csv_filename_node} successfully.") @@ -3257,7 +4093,9 @@ def export_realtime_simulation_result_to_csv(start_date: str, end_date: str, buc # 2025/02/18 -def export_scheme_simulation_result_to_csv_time(start_date: str, end_date: str, bucket: str="scheme_simulation_result") -> None: +def export_scheme_simulation_result_to_csv_time( + start_date: str, end_date: str, bucket: str = "scheme_simulation_result" +) -> None: """ 导出influxdb中scheme_simulation_result这个bucket的数据到csv中 :param start_date: 查询开始的时间,格式为 'YYYY-MM-DD' @@ -3267,18 +4105,30 @@ def export_scheme_simulation_result_to_csv_time(start_date: str, end_date: str, """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() # 将 start_date 的北京时间转换为 UTC 时间范围 - start_time = (datetime.strptime(start_date, "%Y-%m-%d") - timedelta(days=1)).replace(hour=16, minute=0, second=0, tzinfo=timezone.utc).isoformat() - stop_time = datetime.strptime(end_date, "%Y-%m-%d").replace(hour=15, minute=59, second=59, tzinfo=timezone.utc).isoformat() + start_time = ( + (datetime.strptime(start_date, "%Y-%m-%d") - timedelta(days=1)) + .replace(hour=16, minute=0, second=0, tzinfo=timezone.utc) + .isoformat() + ) + stop_time = ( + datetime.strptime(end_date, "%Y-%m-%d") + .replace(hour=15, minute=59, second=59, tzinfo=timezone.utc) + .isoformat() + ) # 构建 Flux 查询语句,查询指定时间范围内的数据 - flux_query_link = f''' + flux_query_link = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["_measurement"] == "link") - ''' + """ # 执行查询 link_tables = query_api.query(flux_query_link) # 存储link类的数据 @@ -3286,21 +4136,21 @@ def export_scheme_simulation_result_to_csv_time(start_date: str, end_date: str, link_data = {} for table in link_tables: for record in table.records: - key = (record.get_time(), record.values.get('ID', None)) + key = (record.get_time(), record.values.get("ID", None)) if key not in link_data: link_data[key] = {} field = record.get_field() link_data[key][field] = record.get_value() - link_data[key]['measurement'] = record.get_measurement() - link_data[key]['date'] = record.values.get('date', None) - link_data[key]['scheme_Type'] = record.values.get('scheme_Type', None) - link_data[key]['scheme_Name'] = record.values.get('scheme_Name', None) + link_data[key]["measurement"] = record.get_measurement() + link_data[key]["date"] = record.values.get("date", None) + link_data[key]["scheme_Type"] = record.values.get("scheme_Type", None) + link_data[key]["scheme_Name"] = record.values.get("scheme_Name", None) # 构建 Flux 查询语句,查询指定时间范围内的数据 - flux_query_node = f''' + flux_query_node = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["_measurement"] == "node") - ''' + """ # 执行查询 node_tables = query_api.query(flux_query_node) # 存储node类的数据 @@ -3308,34 +4158,68 @@ def export_scheme_simulation_result_to_csv_time(start_date: str, end_date: str, node_data = {} for table in node_tables: for record in table.records: - key = (record.get_time(), record.values.get('ID', None)) + key = (record.get_time(), record.values.get("ID", None)) if key not in node_data: node_data[key] = {} field = record.get_field() node_data[key][field] = record.get_value() - node_data[key]['measurement'] = record.get_measurement() - node_data[key]['date'] = record.values.get('date', None) - node_data[key]['scheme_Type'] = record.values.get('scheme_Type', None) - node_data[key]['scheme_Name'] = record.values.get('scheme_Name', None) + node_data[key]["measurement"] = record.get_measurement() + node_data[key]["date"] = record.values.get("date", None) + node_data[key]["scheme_Type"] = record.values.get("scheme_Type", None) + node_data[key]["scheme_Name"] = record.values.get("scheme_Name", None) for key in set(link_data.keys()): - row = {'time': key[0], "ID": key[1]} + row = {"time": key[0], "ID": key[1]} row.update(link_data.get(key, {})) link_rows.append(row) for key in set(node_data.keys()): - row = {'time': key[0], "ID": key[1]} + row = {"time": key[0], "ID": key[1]} row.update(node_data.get(key, {})) node_rows.append(row) # 动态生成 CSV 文件名 csv_filename_link = f"scheme_simulation_link_result_{start_date}至{end_date}.csv" csv_filename_node = f"scheme_simulation_node_result_{start_date}至{end_date}.csv" # 写入到 CSV 文件 - with open(csv_filename_link, mode='w', newline='') as file: - writer = csv.DictWriter(file, fieldnames=['time', 'measurement', 'date', 'scheme_Type', 'scheme_Name', 'ID', 'flow', 'leakage', 'velocity', 'headloss', 'status', 'setting', 'quality', 'friction', 'reaction']) + with open(csv_filename_link, mode="w", newline="") as file: + writer = csv.DictWriter( + file, + fieldnames=[ + "time", + "measurement", + "date", + "scheme_Type", + "scheme_Name", + "ID", + "flow", + "leakage", + "velocity", + "headloss", + "status", + "setting", + "quality", + "friction", + "reaction", + ], + ) writer.writeheader() writer.writerows(link_rows) - with open(csv_filename_node, mode='w', newline='') as file: - writer = csv.DictWriter(file, fieldnames=['time', 'measurement', 'date', 'scheme_Type', 'scheme_Name', 'ID', 'head', 'pressure', 'actualdemand', - 'demanddeficit', 'totalExternalOutflow', 'quality']) + with open(csv_filename_node, mode="w", newline="") as file: + writer = csv.DictWriter( + file, + fieldnames=[ + "time", + "measurement", + "date", + "scheme_Type", + "scheme_Name", + "ID", + "head", + "pressure", + "actualdemand", + "demanddeficit", + "totalExternalOutflow", + "quality", + ], + ) writer.writeheader() writer.writerows(node_rows) print(f"Data exported to {csv_filename_link} and {csv_filename_node} successfully.") @@ -3343,7 +4227,12 @@ def export_scheme_simulation_result_to_csv_time(start_date: str, end_date: str, # 2025/02/18 -def export_scheme_simulation_result_to_csv_scheme(scheme_Type: str, scheme_Name: str, query_date: str, bucket: str="scheme_simulation_result") -> None: +def export_scheme_simulation_result_to_csv_scheme( + scheme_Type: str, + scheme_Name: str, + query_date: str, + bucket: str = "scheme_simulation_result", +) -> None: """ 导出influxdb中scheme_simulation_result这个bucket的数据到csv中 :param scheme_Type: 查询的方案类型 @@ -3354,17 +4243,29 @@ def export_scheme_simulation_result_to_csv_scheme(scheme_Type: str, scheme_Name: """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) query_api = client.query_api() - start_time = (datetime.strptime(query_date, "%Y-%m-%d") - timedelta(days=1)).replace(hour=16, minute=0, second=0, tzinfo=timezone.utc).isoformat() - stop_time = datetime.strptime(query_date, "%Y-%m-%d").replace(hour=15, minute=59, second=59, tzinfo=timezone.utc).isoformat() + start_time = ( + (datetime.strptime(query_date, "%Y-%m-%d") - timedelta(days=1)) + .replace(hour=16, minute=0, second=0, tzinfo=timezone.utc) + .isoformat() + ) + stop_time = ( + datetime.strptime(query_date, "%Y-%m-%d") + .replace(hour=15, minute=59, second=59, tzinfo=timezone.utc) + .isoformat() + ) # 构建 Flux 查询语句,查询指定时间范围内的数据 - flux_query_link = f''' + flux_query_link = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["_measurement"] == "link" and r["scheme_Type"] == "{scheme_Type}" and r["scheme_Name"] == "{scheme_Name}") - ''' + """ # 执行查询 link_tables = query_api.query(flux_query_link) # 存储link类的数据 @@ -3372,21 +4273,21 @@ def export_scheme_simulation_result_to_csv_scheme(scheme_Type: str, scheme_Name: link_data = {} for table in link_tables: for record in table.records: - key = (record.get_time(), record.values.get('ID', None)) + key = (record.get_time(), record.values.get("ID", None)) if key not in link_data: link_data[key] = {} field = record.get_field() link_data[key][field] = record.get_value() - link_data[key]['measurement'] = record.get_measurement() - link_data[key]['date'] = record.values.get('date', None) - link_data[key]['scheme_Type'] = record.values.get('scheme_Type', None) - link_data[key]['scheme_Name'] = record.values.get('scheme_Name', None) + link_data[key]["measurement"] = record.get_measurement() + link_data[key]["date"] = record.values.get("date", None) + link_data[key]["scheme_Type"] = record.values.get("scheme_Type", None) + link_data[key]["scheme_Name"] = record.values.get("scheme_Name", None) # 构建 Flux 查询语句,查询指定时间范围内的数据 - flux_query_node = f''' + flux_query_node = f""" from(bucket: "{bucket}") |> range(start: {start_time}, stop: {stop_time}) |> filter(fn: (r) => r["_measurement"] == "node" and r["scheme_Type"] == "{scheme_Type}" and r["scheme_Name"] == "{scheme_Name}") - ''' + """ # 执行查询 node_tables = query_api.query(flux_query_node) # 存储node类的数据 @@ -3394,41 +4295,81 @@ def export_scheme_simulation_result_to_csv_scheme(scheme_Type: str, scheme_Name: node_data = {} for table in node_tables: for record in table.records: - key = (record.get_time(), record.values.get('ID', None)) + key = (record.get_time(), record.values.get("ID", None)) if key not in node_data: node_data[key] = {} field = record.get_field() node_data[key][field] = record.get_value() - node_data[key]['measurement'] = record.get_measurement() - node_data[key]['date'] = record.values.get('date', None) - node_data[key]['scheme_Type'] = record.values.get('scheme_Type', None) - node_data[key]['scheme_Name'] = record.values.get('scheme_Name', None) + node_data[key]["measurement"] = record.get_measurement() + node_data[key]["date"] = record.values.get("date", None) + node_data[key]["scheme_Type"] = record.values.get("scheme_Type", None) + node_data[key]["scheme_Name"] = record.values.get("scheme_Name", None) for key in set(link_data.keys()): - row = {'time': key[0], "ID": key[1]} + row = {"time": key[0], "ID": key[1]} row.update(link_data.get(key, {})) link_rows.append(row) for key in set(node_data.keys()): - row = {'time': key[0], "ID": key[1]} + row = {"time": key[0], "ID": key[1]} row.update(node_data.get(key, {})) node_rows.append(row) # 动态生成 CSV 文件名 - csv_filename_link = f"scheme_simulation_link_result_{scheme_Name}_of_{scheme_Type}.csv" - csv_filename_node = f"scheme_simulation_node_result_{scheme_Name}_of_{scheme_Type}.csv" + csv_filename_link = ( + f"scheme_simulation_link_result_{scheme_Name}_of_{scheme_Type}.csv" + ) + csv_filename_node = ( + f"scheme_simulation_node_result_{scheme_Name}_of_{scheme_Type}.csv" + ) # 写入到 CSV 文件 - with open(csv_filename_link, mode='w', newline='') as file: - writer = csv.DictWriter(file, fieldnames=['time', 'measurement', 'date', 'scheme_Type', 'scheme_Name', 'ID', 'flow', 'leakage', 'velocity', 'headloss', 'status', 'setting', 'quality', 'friction', 'reaction']) + with open(csv_filename_link, mode="w", newline="") as file: + writer = csv.DictWriter( + file, + fieldnames=[ + "time", + "measurement", + "date", + "scheme_Type", + "scheme_Name", + "ID", + "flow", + "leakage", + "velocity", + "headloss", + "status", + "setting", + "quality", + "friction", + "reaction", + ], + ) writer.writeheader() writer.writerows(link_rows) - with open(csv_filename_node, mode='w', newline='') as file: - writer = csv.DictWriter(file, fieldnames=['time', 'measurement', 'date', 'scheme_Type', 'scheme_Name', 'ID', 'head', 'pressure', 'actualdemand', - 'demanddeficit', 'totalExternalOutflow', 'quality']) + with open(csv_filename_node, mode="w", newline="") as file: + writer = csv.DictWriter( + file, + fieldnames=[ + "time", + "measurement", + "date", + "scheme_Type", + "scheme_Name", + "ID", + "head", + "pressure", + "actualdemand", + "demanddeficit", + "totalExternalOutflow", + "quality", + ], + ) writer.writeheader() writer.writerows(node_rows) print(f"Data exported to {csv_filename_link} and {csv_filename_node} successfully.") client.close() -def upload_cleaned_SCADA_data_to_influxdb(file_path: str, bucket: str="SCADA_data") -> None: +def upload_cleaned_SCADA_data_to_influxdb( + file_path: str, bucket: str = "SCADA_data" +) -> None: """ 将清洗后的SCADA数据导入influxdb,有标准化导入格式 :param file_path: 导入数据的文件 @@ -3437,32 +4378,42 @@ def upload_cleaned_SCADA_data_to_influxdb(file_path: str, bucket: str="SCADA_dat """ data_list = [] - with open(file_path, mode='r', encoding='utf-8-sig') as csv_file: + with open(file_path, mode="r", encoding="utf-8-sig") as csv_file: csv_reader = csv.DictReader(csv_file) for row in csv_reader: # 解析日期和时间字段 - datetime_value = datetime.strptime(row['time'], '%Y-%m-%d %H:%M:%S%z') + datetime_value = datetime.strptime(row["time"], "%Y-%m-%d %H:%M:%S%z") # 处理datacleaning_value为空的情况 - datacleaning_value = float(row['datacleaning_value']) if row['datacleaning_value'] else None + datacleaning_value = ( + float(row["datacleaning_value"]) if row["datacleaning_value"] else None + ) # 处理monitored_value字段类型错误 try: - monitored_value = float(row['monitored_value']) if row['monitored_value'] else None + monitored_value = ( + float(row["monitored_value"]) if row["monitored_value"] else None + ) except ValueError: monitored_value = None # 如果转换失败,则设为None(或其他适当的默认值) - data_list.append({ - 'measurement': row['measurement'], - 'device_ID': row['device_ID'], - 'date': datetime_value.strftime('%Y-%m-%d'), - 'description': row['description'], - 'monitored_value': monitored_value, - 'datacleaning_value': datacleaning_value, - 'datetime': datetime_value - }) + data_list.append( + { + "measurement": row["measurement"], + "device_ID": row["device_ID"], + "date": datetime_value.strftime("%Y-%m-%d"), + "description": row["description"], + "monitored_value": monitored_value, + "datacleaning_value": datacleaning_value, + "datetime": datetime_value, + } + ) client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) try: write_api = client.write_api(write_options=SYNCHRONOUS) @@ -3471,13 +4422,15 @@ def upload_cleaned_SCADA_data_to_influxdb(file_path: str, bucket: str="SCADA_dat print(data) # 创建Point对象 point = ( - Point(data['measurement']) # measurement为mpointName - .tag("device_ID", data['device_ID']) # tag key为mpointId - .tag("date", data['date']) # 具体日期tag,方便查询 - .tag('description', data['description']) - .field("monitored_value", data['monitored_value']) # field key为dataValue - .field('datacleaning_value', data['datacleaning_value']) - .time(data['datetime']) # 时间以datetime为准 + Point(data["measurement"]) # measurement为mpointName + .tag("device_ID", data["device_ID"]) # tag key为mpointId + .tag("date", data["date"]) # 具体日期tag,方便查询 + .tag("description", data["description"]) + .field( + "monitored_value", data["monitored_value"] + ) # field key为dataValue + .field("datacleaning_value", data["datacleaning_value"]) + .time(data["datetime"]) # 时间以datetime为准 ) write_api.write(bucket=bucket, record=point) @@ -3487,10 +4440,11 @@ def upload_cleaned_SCADA_data_to_influxdb(file_path: str, bucket: str="SCADA_dat except Exception as e: print(f"未知错误: {str(e)}") finally: - if 'write_api' in locals(): + if "write_api" in locals(): write_api.close() client.close() + # 2025/05/05 DingZQ # 删除某一天的数据 def delete_data(delete_date: str, bucket: str) -> None: @@ -3502,18 +4456,32 @@ def delete_data(delete_date: str, bucket: str) -> None: """ client = get_new_client() if not client.ping(): - print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))) + print( + "{} -- Failed to connect to InfluxDB.".format( + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ) + ) - start_time = (datetime.strptime(delete_date, "%Y-%m-%d") - timedelta(days=1)).replace(hour=16, minute=0, second=0, tzinfo=timezone.utc).isoformat() - stop_time = datetime.strptime(delete_date, "%Y-%m-%d").replace(hour=15, minute=59, second=59, tzinfo=timezone.utc).isoformat() + start_time = ( + (datetime.strptime(delete_date, "%Y-%m-%d") - timedelta(days=1)) + .replace(hour=16, minute=0, second=0, tzinfo=timezone.utc) + .isoformat() + ) + stop_time = ( + datetime.strptime(delete_date, "%Y-%m-%d") + .replace(hour=15, minute=59, second=59, tzinfo=timezone.utc) + .isoformat() + ) # 构造删除谓词(InfluxDB Delete API 要求的 SQL-like 语句) # 注意:字段名用 _field,measurement 用 _measurement,标签直接写标签名 predicate = f'date="{delete_date}"' delete_api: DeleteApi = client.delete_api() - delete_api.delete(start=start_time, stop=stop_time, predicate=predicate, bucket=bucket) - + delete_api.delete( + start=start_time, stop=stop_time, predicate=predicate, bucket=bucket + ) + # 示例调用 if __name__ == "__main__": @@ -3529,13 +4497,11 @@ if __name__ == "__main__": # except Exception as e: # print(f"连接失败: {e}") - # step2: 先查询pg数据库中scada_info的信息,然后存储SCADA数据到SCADA_data这个bucket里 # query_pg_scada_info_realtime('bb') # query_pg_scada_info_non_realtime('bb') # query_corresponding_query_id_and_element_id('bb') - # 手动执行存储测试 # 示例1:store_realtime_SCADA_data_to_influxdb # store_realtime_SCADA_data_to_influxdb(get_real_value_time='2025-03-16T11:13:00+08:00') @@ -3642,7 +4608,7 @@ if __name__ == "__main__": # print(leakage) # 示例:upload_cleaned_SCADA_data_to_influxdb - upload_cleaned_SCADA_data_to_influxdb(file_path='./标准cleaned_demand_data.csv') + upload_cleaned_SCADA_data_to_influxdb(file_path="./标准cleaned_demand_data.csv") # 示例:delete_data # delete_data(delete_date='2025-05-04', bucket='SCADA_data') @@ -3651,6 +4617,3 @@ if __name__ == "__main__": # result = query_cleaned_SCADA_data_by_device_ID_and_timerange(query_ids_list=['9485'], start_time='2024-03-24T00:00:00+08:00', # end_time='2024-03-26T23:59:00+08:00') # print(result) - - - diff --git a/main.py b/main.py index 6889d9d..b658bdc 100644 --- a/main.py +++ b/main.py @@ -4030,9 +4030,9 @@ if __name__ == "__main__": # uvicorn.run(app, host="0.0.0.0", port=8000) # url='http://127.0.0.1:8000/valve_close_analysis?network=beibeizone&start_time=2024-04-01T08:00:00Z&valve_IDs=GSD2307192058577780A3287D78&valve_IDs=GSD2307192058572E953B707226(S2)&duration=1800' # url='http://127.0.0.1:8000/burst_analysis?network=beibeizone&start_time=2024-04-01T08:00:00Z&burst_ID=ZBBGXSZW000001&duration=1800' - # url = "http://192.168.1.36:8000/queryallschemeallrecords/?schemename=Fangan0817114448&querydate=2025-08-13&schemetype=burst_Analysis" + url = "http://192.168.1.36:8000/queryallschemeallrecords/?schemename=Fangan0817114448&querydate=2025-08-13&schemetype=burst_Analysis" # response = Request.get(url) - # import requests + import requests - # response = requests.get(url) + response = requests.get(url)