统一scheme_type命名

This commit is contained in:
2026-02-05 15:39:56 +08:00
parent a85ff8e215
commit 6434cae21c
6 changed files with 72 additions and 72 deletions

View File

@@ -261,7 +261,7 @@ def valve_close_analysis(
modify_pattern_start_time=modify_pattern_start_time,
modify_total_duration=modify_total_duration,
modify_valve_opening=modify_valve_opening,
scheme_Type="valve_close_Analysis",
scheme_type="valve_close_Analysis",
scheme_name=scheme_name,
)
# step 3. restore the base model
@@ -392,7 +392,7 @@ def flushing_analysis(
modify_pattern_start_time=modify_pattern_start_time,
modify_total_duration=modify_total_duration,
modify_valve_opening=modify_valve_opening,
scheme_Type="flushing_Analysis",
scheme_type="flushing_analysis",
scheme_name=scheme_name,
)
# step 4. restore the base model
@@ -711,7 +711,7 @@ def pressure_regulation(
modify_tank_initial_level=modify_tank_initial_level,
modify_fixed_pump_pattern=modify_fixed_pump_pattern,
modify_variable_pump_pattern=modify_variable_pump_pattern,
scheme_Type="pressure_regulation",
scheme_type="pressure_regulation",
scheme_name=scheme_name,
)
if is_project_open(new_name):

View File

@@ -316,7 +316,7 @@ async def fastapi_query_all_scheme_all_records(
return loaded_dict
results = influxdb_api.query_scheme_all_record(
scheme_Type=schemetype, scheme_name=schemename, query_date=querydate
scheme_type=schemetype, scheme_name=schemename, query_date=querydate
)
packed = msgpack.packb(results, default=encode_datetime)
redis_client.set(cache_key, packed)
@@ -334,7 +334,7 @@ async def fastapi_query_all_scheme_all_records_property(
all_results = msgpack.unpackb(data, object_hook=decode_datetime)
else:
all_results = influxdb_api.query_scheme_all_record(
scheme_Type=schemetype, scheme_name=schemename, query_date=querydate
scheme_type=schemetype, scheme_name=schemename, query_date=querydate
)
packed = msgpack.packb(all_results, default=encode_datetime)
redis_client.set(cache_key, packed)

View File

@@ -404,7 +404,7 @@ def create_and_initialize_buckets(org_name: str) -> None:
Point("link")
.tag("date", None)
.tag("ID", None)
.tag("scheme_Type", None)
.tag("scheme_type", None)
.tag("scheme_name", None)
.field("flow", 0.0)
.field("leakage", 0.0)
@@ -420,7 +420,7 @@ def create_and_initialize_buckets(org_name: str) -> None:
Point("node")
.tag("date", None)
.tag("ID", None)
.tag("scheme_Type", None)
.tag("scheme_type", None)
.tag("scheme_name", None)
.field("head", 0.0)
.field("pressure", 0.0)
@@ -436,7 +436,7 @@ def create_and_initialize_buckets(org_name: str) -> None:
.tag("date", None)
.tag("description", None)
.tag("device_ID", None)
.tag("scheme_Type", None)
.tag("scheme_type", None)
.tag("scheme_name", None)
.field("monitored_value", 0.0)
.field("datacleaning_value", 0.0)
@@ -1811,7 +1811,7 @@ def query_SCADA_data_by_device_ID_and_time(
def query_scheme_SCADA_data_by_device_ID_and_time(
query_ids_list: List[str],
query_time: str,
scheme_Type: str,
scheme_type: str,
scheme_name: str,
bucket: str = "scheme_simulation_result",
) -> Dict[str, float]:
@@ -1843,7 +1843,7 @@ def query_scheme_SCADA_data_by_device_ID_and_time(
flux_query = f"""
from(bucket: "{bucket}")
|> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()})
|> filter(fn: (r) => r["device_ID"] == "{device_id}" and r["_field"] == "monitored_value" and r["scheme_Type"] == "{scheme_Type}" and r["scheme_name"] == "{scheme_name}")
|> filter(fn: (r) => r["device_ID"] == "{device_id}" and r["_field"] == "monitored_value" and r["scheme_type"] == "{scheme_type}" and r["scheme_name"] == "{scheme_name}")
"""
# 执行查询
try:
@@ -3227,7 +3227,7 @@ def store_scheme_simulation_result_to_influxdb(
link_result_list: List[Dict[str, any]],
scheme_start_time: str,
num_periods: int = 1,
scheme_Type: str = None,
scheme_type: str = None,
scheme_name: str = None,
bucket: str = "scheme_simulation_result",
):
@@ -3237,7 +3237,7 @@ def store_scheme_simulation_result_to_influxdb(
:param link_result_list: (List[Dict[str, any]]): 包含连接和结果数据的字典列表。
:param scheme_start_time: (str): 方案模拟开始时间。
:param num_periods: (int): 方案模拟的周期数
:param scheme_Type: (str): 方案类型
:param scheme_type: (str): 方案类型
:param scheme_name: (str): 方案名称
:param bucket: (str): InfluxDB 的 bucket 名称,默认值为 "scheme_simulation_result"
:return:
@@ -3298,7 +3298,7 @@ def store_scheme_simulation_result_to_influxdb(
Point("node")
.tag("date", date_str)
.tag("ID", node_id)
.tag("scheme_Type", scheme_Type)
.tag("scheme_type", scheme_type)
.tag("scheme_name", scheme_name)
.field("head", data.get("head", 0.0))
.field("pressure", data.get("pressure", 0.0))
@@ -3322,7 +3322,7 @@ def store_scheme_simulation_result_to_influxdb(
Point("link")
.tag("date", date_str)
.tag("ID", link_id)
.tag("scheme_Type", scheme_Type)
.tag("scheme_type", scheme_type)
.tag("scheme_name", scheme_name)
.field("flow", data.get("flow", 0.0))
.field("velocity", data.get("velocity", 0.0))
@@ -3409,13 +3409,13 @@ def query_corresponding_query_id_and_element_id(name: str) -> None:
# 2025/03/11
def fill_scheme_simulation_result_to_SCADA(
scheme_Type: str = None,
scheme_type: str = None,
scheme_name: str = None,
query_date: str = None,
bucket: str = "scheme_simulation_result",
):
"""
:param scheme_Type: 方案类型
:param scheme_type: 方案类型
:param scheme_name: 方案名称
:param query_date: 查询日期,格式为 'YYYY-MM-DD'
:param bucket: InfluxDB 的 bucket 名称,默认值为 "scheme_simulation_result"
@@ -3457,7 +3457,7 @@ def fill_scheme_simulation_result_to_SCADA(
# 查找associated_element_id的对应值
for key, value in globals.scheme_source_outflow_ids.items():
scheme_source_outflow_result = query_scheme_curve_by_ID_property(
scheme_Type=scheme_Type,
scheme_type=scheme_type,
scheme_name=scheme_name,
query_date=query_date,
ID=value,
@@ -3470,7 +3470,7 @@ def fill_scheme_simulation_result_to_SCADA(
Point("scheme_source_outflow")
.tag("date", query_date)
.tag("device_ID", key)
.tag("scheme_Type", scheme_Type)
.tag("scheme_type", scheme_type)
.tag("scheme_name", scheme_name)
.field("monitored_value", data["value"])
.time(data["time"], write_precision="s")
@@ -3480,7 +3480,7 @@ def fill_scheme_simulation_result_to_SCADA(
for key, value in globals.scheme_pipe_flow_ids.items():
scheme_pipe_flow_result = query_scheme_curve_by_ID_property(
scheme_Type=scheme_Type,
scheme_type=scheme_type,
scheme_name=scheme_name,
query_date=query_date,
ID=value,
@@ -3492,7 +3492,7 @@ def fill_scheme_simulation_result_to_SCADA(
Point("scheme_pipe_flow")
.tag("date", query_date)
.tag("device_ID", key)
.tag("scheme_Type", scheme_Type)
.tag("scheme_type", scheme_type)
.tag("scheme_name", scheme_name)
.field("monitored_value", data["value"])
.time(data["time"], write_precision="s")
@@ -3502,7 +3502,7 @@ def fill_scheme_simulation_result_to_SCADA(
for key, value in globals.scheme_pressure_ids.items():
scheme_pressure_result = query_scheme_curve_by_ID_property(
scheme_Type=scheme_Type,
scheme_type=scheme_type,
scheme_name=scheme_name,
query_date=query_date,
ID=value,
@@ -3514,7 +3514,7 @@ def fill_scheme_simulation_result_to_SCADA(
Point("scheme_pressure")
.tag("date", query_date)
.tag("device_ID", key)
.tag("scheme_Type", scheme_Type)
.tag("scheme_type", scheme_type)
.tag("scheme_name", scheme_name)
.field("monitored_value", data["value"])
.time(data["time"], write_precision="s")
@@ -3524,7 +3524,7 @@ def fill_scheme_simulation_result_to_SCADA(
for key, value in globals.scheme_demand_ids.items():
scheme_demand_result = query_scheme_curve_by_ID_property(
scheme_Type=scheme_Type,
scheme_type=scheme_type,
scheme_name=scheme_name,
query_date=query_date,
ID=value,
@@ -3536,7 +3536,7 @@ def fill_scheme_simulation_result_to_SCADA(
Point("scheme_demand")
.tag("date", query_date)
.tag("device_ID", key)
.tag("scheme_Type", scheme_Type)
.tag("scheme_type", scheme_type)
.tag("scheme_name", scheme_name)
.field("monitored_value", data["value"])
.time(data["time"], write_precision="s")
@@ -3546,7 +3546,7 @@ def fill_scheme_simulation_result_to_SCADA(
for key, value in globals.scheme_quality_ids.items():
scheme_quality_result = query_scheme_curve_by_ID_property(
scheme_Type=scheme_Type,
scheme_type=scheme_type,
scheme_name=scheme_name,
query_date=query_date,
ID=value,
@@ -3558,7 +3558,7 @@ def fill_scheme_simulation_result_to_SCADA(
Point("scheme_quality")
.tag("date", query_date)
.tag("device_ID", key)
.tag("scheme_Type", scheme_Type)
.tag("scheme_type", scheme_type)
.tag("scheme_name", scheme_name)
.field("monitored_value", data["value"])
.time(data["time"], write_precision="s")
@@ -3629,14 +3629,14 @@ def query_SCADA_data_curve(
# 2025/02/18
def query_scheme_all_record_by_time(
scheme_Type: str,
scheme_type: str,
scheme_name: str,
query_time: str,
bucket: str = "scheme_simulation_result",
) -> tuple:
"""
查询指定方案某一时刻的所有记录包括node'link分别以指定格式返回。
:param scheme_Type: 方案类型
:param scheme_type: 方案类型
:param scheme_name: 方案名称
:param query_time: 输入的北京时间,格式为 '2024-11-24T17:30:00+08:00'
:param bucket: 数据存储的 bucket 名称。
@@ -3660,7 +3660,7 @@ def query_scheme_all_record_by_time(
flux_query = f"""
from(bucket: "{bucket}")
|> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()})
|> filter(fn: (r) => r["scheme_Type"] == "{scheme_Type}" and r["scheme_name"] == "{scheme_name}" and r["_measurement"] == "node" or r["_measurement"] == "link")
|> filter(fn: (r) => r["scheme_type"] == "{scheme_type}" and r["scheme_name"] == "{scheme_name}" and r["_measurement"] == "node" or r["_measurement"] == "link")
|> pivot(
rowKey:["_time"],
columnKey:["_field"],
@@ -3710,7 +3710,7 @@ def query_scheme_all_record_by_time(
# 2025/03/04
def query_scheme_all_record_by_time_property(
scheme_Type: str,
scheme_type: str,
scheme_name: str,
query_time: str,
type: str,
@@ -3719,7 +3719,7 @@ def query_scheme_all_record_by_time_property(
) -> list:
"""
查询指定方案某一时刻node'link某一属性值以指定格式返回。
:param scheme_Type: 方案类型
:param scheme_type: 方案类型
:param scheme_name: 方案名称
:param query_time: 输入的北京时间,格式为 '2024-11-24T17:30:00+08:00'
:param type: 查询的类型(决定 measurement
@@ -3752,7 +3752,7 @@ def query_scheme_all_record_by_time_property(
flux_query = f"""
from(bucket: "{bucket}")
|> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()})
|> filter(fn: (r) => r["scheme_Type"] == "{scheme_Type}" and r["scheme_name"] == "{scheme_name}" and r["_measurement"] == "{measurement}" and r["_field"] == "{property}")
|> filter(fn: (r) => r["scheme_type"] == "{scheme_type}" and r["scheme_name"] == "{scheme_name}" and r["_measurement"] == "{measurement}" and r["_field"] == "{property}")
"""
# 执行查询
tables = query_api.query(flux_query)
@@ -3767,7 +3767,7 @@ def query_scheme_all_record_by_time_property(
# 2025/02/19
def query_scheme_curve_by_ID_property(
scheme_Type: str,
scheme_type: str,
scheme_name: str,
query_date: str,
ID: str,
@@ -3777,7 +3777,7 @@ def query_scheme_curve_by_ID_property(
) -> list:
"""
根据scheme_Type和scheme_name,查询该模拟方案中某一node或link的某一属性值的所有时间的结果
:param scheme_Type: 方案类型
:param scheme_type: 方案类型
:param scheme_name: 方案名称
:param query_date: 查询日期,格式为 'YYYY-MM-DD'
:param ID: 元素的ID
@@ -3817,7 +3817,7 @@ def query_scheme_curve_by_ID_property(
flux_query = f"""
from(bucket: "{bucket}")
|> range(start: {start_time}, stop: {stop_time})
|> filter(fn: (r) => r["_measurement"] == "{measurement}" and r["scheme_Type"] == "{scheme_Type}" and r["scheme_name"] == "{scheme_name}" and r["ID"] == "{ID}" and r["_field"] == "{property}")
|> filter(fn: (r) => r["_measurement"] == "{measurement}" and r["scheme_type"] == "{scheme_type}" and r["scheme_name"] == "{scheme_name}" and r["ID"] == "{ID}" and r["_field"] == "{property}")
"""
# 执行查询
tables = query_api.query(flux_query)
@@ -3832,14 +3832,14 @@ def query_scheme_curve_by_ID_property(
# 2025/02/21
def query_scheme_all_record(
scheme_Type: str,
scheme_type: str,
scheme_name: str,
query_date: str,
bucket: str = "scheme_simulation_result",
) -> tuple:
"""
查询指定方案的所有记录包括node'link分别以指定格式返回。
:param scheme_Type: 方案类型
:param scheme_type: 方案类型
:param scheme_name: 方案名称
:param query_date: 查询日期,格式为 'YYYY-MM-DD'
:param bucket: 数据存储的 bucket 名称。
@@ -3867,7 +3867,7 @@ def query_scheme_all_record(
flux_query = f"""
from(bucket: "{bucket}")
|> range(start: {utc_start_time.isoformat()}, stop: {utc_stop_time.isoformat()})
|> filter(fn: (r) => r["scheme_Type"] == "{scheme_Type}" and r["scheme_name"] == "{scheme_name}" and r["_measurement"] == "node" or r["_measurement"] == "link")
|> filter(fn: (r) => r["scheme_type"] == "{scheme_type}" and r["scheme_name"] == "{scheme_name}" and r["_measurement"] == "node" or r["_measurement"] == "link")
|> pivot(
rowKey:["_time"],
columnKey:["_field"],
@@ -3917,7 +3917,7 @@ def query_scheme_all_record(
# 2025/03/04
def query_scheme_all_record_property(
scheme_Type: str,
scheme_type: str,
scheme_name: str,
query_date: str,
type: str,
@@ -3926,7 +3926,7 @@ def query_scheme_all_record_property(
) -> list:
"""
查询指定方案的node'link的某一属性值以指定格式返回。
:param scheme_Type: 方案类型
:param scheme_type: 方案类型
:param scheme_name: 方案名称
:param query_date: 查询日期,格式为 'YYYY-MM-DD'
:param type: 查询的类型(决定 measurement
@@ -3964,7 +3964,7 @@ def query_scheme_all_record_property(
flux_query = f"""
from(bucket: "{bucket}")
|> range(start: {start_time}, stop: {stop_time})
|> filter(fn: (r) => r["scheme_Type"] == "{scheme_Type}" and r["scheme_name"] == "{scheme_name}" and r["date"] == "{query_date}" and r["_measurement"] == "{measurement}" and r["_field"] == "{property}")
|> filter(fn: (r) => r["scheme_type"] == "{scheme_type}" and r["scheme_name"] == "{scheme_name}" and r["date"] == "{query_date}" and r["_measurement"] == "{measurement}" and r["_field"] == "{property}")
"""
# 执行查询
tables = query_api.query(flux_query)
@@ -4245,7 +4245,7 @@ def export_scheme_simulation_result_to_csv_time(
link_data[key][field] = record.get_value()
link_data[key]["measurement"] = record.get_measurement()
link_data[key]["date"] = record.values.get("date", None)
link_data[key]["scheme_Type"] = record.values.get("scheme_Type", None)
link_data[key]["scheme_type"] = record.values.get("scheme_type", None)
link_data[key]["scheme_name"] = record.values.get("scheme_name", None)
# 构建 Flux 查询语句,查询指定时间范围内的数据
flux_query_node = f"""
@@ -4267,7 +4267,7 @@ def export_scheme_simulation_result_to_csv_time(
node_data[key][field] = record.get_value()
node_data[key]["measurement"] = record.get_measurement()
node_data[key]["date"] = record.values.get("date", None)
node_data[key]["scheme_Type"] = record.values.get("scheme_Type", None)
node_data[key]["scheme_type"] = record.values.get("scheme_type", None)
node_data[key]["scheme_name"] = record.values.get("scheme_name", None)
for key in set(link_data.keys()):
row = {"time": key[0], "ID": key[1]}
@@ -4288,7 +4288,7 @@ def export_scheme_simulation_result_to_csv_time(
"time",
"measurement",
"date",
"scheme_Type",
"scheme_type",
"scheme_name",
"ID",
"flow",
@@ -4311,7 +4311,7 @@ def export_scheme_simulation_result_to_csv_time(
"time",
"measurement",
"date",
"scheme_Type",
"scheme_type",
"scheme_name",
"ID",
"head",
@@ -4330,14 +4330,14 @@ def export_scheme_simulation_result_to_csv_time(
# 2025/02/18
def export_scheme_simulation_result_to_csv_scheme(
scheme_Type: str,
scheme_type: str,
scheme_name: str,
query_date: str,
bucket: str = "scheme_simulation_result",
) -> None:
"""
导出influxdb中scheme_simulation_result这个bucket的数据到csv中
:param scheme_Type: 查询的方案类型
:param scheme_type: 查询的方案类型
:param scheme_name: 查询的方案名
:param query_date: 查询日期,格式为 'YYYY-MM-DD'
:param bucket: 数据存储的 bucket 名称,默认值为 "SCADA_data"
@@ -4366,7 +4366,7 @@ def export_scheme_simulation_result_to_csv_scheme(
flux_query_link = f"""
from(bucket: "{bucket}")
|> range(start: {start_time}, stop: {stop_time})
|> filter(fn: (r) => r["_measurement"] == "link" and r["scheme_Type"] == "{scheme_Type}" and r["scheme_name"] == "{scheme_name}")
|> filter(fn: (r) => r["_measurement"] == "link" and r["scheme_type"] == "{scheme_type}" and r["scheme_name"] == "{scheme_name}")
"""
# 执行查询
link_tables = query_api.query(flux_query_link)
@@ -4382,13 +4382,13 @@ def export_scheme_simulation_result_to_csv_scheme(
link_data[key][field] = record.get_value()
link_data[key]["measurement"] = record.get_measurement()
link_data[key]["date"] = record.values.get("date", None)
link_data[key]["scheme_Type"] = record.values.get("scheme_Type", None)
link_data[key]["scheme_type"] = record.values.get("scheme_type", None)
link_data[key]["scheme_name"] = record.values.get("scheme_name", None)
# 构建 Flux 查询语句,查询指定时间范围内的数据
flux_query_node = f"""
from(bucket: "{bucket}")
|> range(start: {start_time}, stop: {stop_time})
|> filter(fn: (r) => r["_measurement"] == "node" and r["scheme_Type"] == "{scheme_Type}" and r["scheme_name"] == "{scheme_name}")
|> filter(fn: (r) => r["_measurement"] == "node" and r["scheme_type"] == "{scheme_type}" and r["scheme_name"] == "{scheme_name}")
"""
# 执行查询
node_tables = query_api.query(flux_query_node)
@@ -4404,7 +4404,7 @@ def export_scheme_simulation_result_to_csv_scheme(
node_data[key][field] = record.get_value()
node_data[key]["measurement"] = record.get_measurement()
node_data[key]["date"] = record.values.get("date", None)
node_data[key]["scheme_Type"] = record.values.get("scheme_Type", None)
node_data[key]["scheme_type"] = record.values.get("scheme_type", None)
node_data[key]["scheme_name"] = record.values.get("scheme_name", None)
for key in set(link_data.keys()):
row = {"time": key[0], "ID": key[1]}
@@ -4416,10 +4416,10 @@ def export_scheme_simulation_result_to_csv_scheme(
node_rows.append(row)
# 动态生成 CSV 文件名
csv_filename_link = (
f"scheme_simulation_link_result_{scheme_name}_of_{scheme_Type}.csv"
f"scheme_simulation_link_result_{scheme_name}_of_{scheme_type}.csv"
)
csv_filename_node = (
f"scheme_simulation_node_result_{scheme_name}_of_{scheme_Type}.csv"
f"scheme_simulation_node_result_{scheme_name}_of_{scheme_type}.csv"
)
# 写入到 CSV 文件
with open(csv_filename_link, mode="w", newline="") as file:
@@ -4429,7 +4429,7 @@ def export_scheme_simulation_result_to_csv_scheme(
"time",
"measurement",
"date",
"scheme_Type",
"scheme_type",
"scheme_name",
"ID",
"flow",
@@ -4452,7 +4452,7 @@ def export_scheme_simulation_result_to_csv_scheme(
"time",
"measurement",
"date",
"scheme_Type",
"scheme_type",
"scheme_name",
"ID",
"head",
@@ -4878,15 +4878,15 @@ if __name__ == "__main__":
# export_scheme_simulation_result_to_csv_time(start_date='2025-02-13', end_date='2025-02-15')
# 示例9export_scheme_simulation_result_to_csv_scheme
# export_scheme_simulation_result_to_csv_scheme(scheme_Type='burst_Analysis', scheme_name='scheme1', query_date='2025-03-10')
# export_scheme_simulation_result_to_csv_scheme(scheme_type='burst_Analysis', scheme_name='scheme1', query_date='2025-03-10')
# 示例10query_scheme_all_record_by_time
# node_records, link_records = query_scheme_all_record_by_time(scheme_Type='burst_Analysis', scheme_name='scheme1', query_time="2025-02-14T10:30:00+08:00")
# node_records, link_records = query_scheme_all_record_by_time(scheme_type='burst_Analysis', scheme_name='scheme1', query_time="2025-02-14T10:30:00+08:00")
# print("Node 数据:", node_records)
# print("Link 数据:", link_records)
# 示例11query_scheme_curve_by_ID_property
# curve_result = query_scheme_curve_by_ID_property(scheme_Type='burst_Analysis', scheme_name='scheme1', ID='ZBBDTZDP000022',
# curve_result = query_scheme_curve_by_ID_property(scheme_type='burst_Analysis', scheme_name='scheme1', ID='ZBBDTZDP000022',
# type='node', property='head')
# print(curve_result)
@@ -4896,7 +4896,7 @@ if __name__ == "__main__":
# print("Link 数据:", link_records)
# 示例13query_scheme_all_record
# node_records, link_records = query_scheme_all_record(scheme_Type='burst_Analysis', scheme_name='scheme1', query_date='2025-03-10')
# node_records, link_records = query_scheme_all_record(scheme_type='burst_Analysis', scheme_name='scheme1', query_date='2025-03-10')
# print("Node 数据:", node_records)
# print("Link 数据:", link_records)
@@ -4909,16 +4909,16 @@ if __name__ == "__main__":
# print(result_records)
# 示例16query_scheme_all_record_by_time_property
# result_records = query_scheme_all_record_by_time_property(scheme_Type='burst_Analysis', scheme_name='scheme1',
# result_records = query_scheme_all_record_by_time_property(scheme_type='burst_Analysis', scheme_name='scheme1',
# query_time='2025-02-14T10:30:00+08:00', type='node', property='head')
# print(result_records)
# 示例17query_scheme_all_record_property
# result_records = query_scheme_all_record_property(scheme_Type='burst_Analysis', scheme_name='scheme1', query_date='2025-03-10', type='node', property='head')
# result_records = query_scheme_all_record_property(scheme_type='burst_Analysis', scheme_name='scheme1', query_date='2025-03-10', type='node', property='head')
# print(result_records)
# 示例18fill_scheme_simulation_result_to_SCADA
# fill_scheme_simulation_result_to_SCADA(scheme_Type='burst_Analysis', scheme_name='burst0330', query_date='2025-03-30')
# fill_scheme_simulation_result_to_SCADA(scheme_type='burst_Analysis', scheme_name='burst0330', query_date='2025-03-30')
# 示例19query_SCADA_data_by_device_ID_and_timerange
# result = query_SCADA_data_by_device_ID_and_timerange(query_ids_list=globals.pressure_non_realtime_ids, start_time='2025-04-16T00:00:00+08:00',
@@ -4926,7 +4926,7 @@ if __name__ == "__main__":
# print(result)
# 示例manually_get_burst_flow
# leakage = manually_get_burst_flow(scheme_Type='burst_Analysis', scheme_name='burst_scheme', scheme_start_time='2025-03-10T12:00:00+08:00')
# leakage = manually_get_burst_flow(scheme_type='burst_Analysis', scheme_name='burst_scheme', scheme_start_time='2025-03-10T12:00:00+08:00')
# print(leakage)
# 示例upload_cleaned_SCADA_data_to_influxdb

View File

@@ -1249,7 +1249,7 @@ def run_simulation(
endtime = time.time()
logging.info("store time: %f", endtime - starttime)
# 暂不需要再次存储 SCADA 模拟信息
# TimescaleInternalQueries.fill_scheme_simulation_result_to_SCADA(scheme_Type=scheme_Type, scheme_name=scheme_name)
# TimescaleInternalQueries.fill_scheme_simulation_result_to_SCADA(scheme_type=scheme_type, scheme_name=scheme_name)
# if simulation_type.upper() == "REALTIME":
# influxdb_api.store_realtime_simulation_result_to_influxdb(
@@ -1261,11 +1261,11 @@ def run_simulation(
# link_result,
# modify_pattern_start_time,
# num_periods_result,
# scheme_Type,
# scheme_type,
# scheme_name,
# )
# 暂不需要再次存储 SCADA 模拟信息
# influxdb_api.fill_scheme_simulation_result_to_SCADA(scheme_Type=scheme_Type, scheme_name=scheme_name)
# influxdb_api.fill_scheme_simulation_result_to_SCADA(scheme_type=scheme_type, scheme_name=scheme_name)
print("after store result")
@@ -1345,7 +1345,7 @@ if __name__ == "__main__":
# run_simulation(name='bb', simulation_type="realtime", modify_pattern_start_time='2025-02-25T23:45:00+08:00')
# 模拟示例2
# run_simulation(name='bb', simulation_type="extended", modify_pattern_start_time='2025-03-10T12:00:00+08:00',
# modify_total_duration=1800, scheme_Type="burst_Analysis", scheme_name="scheme1")
# modify_total_duration=1800, scheme_type="burst_Analysis", scheme_name="scheme1")
# 查询示例1query_SCADA_ID_corresponding_info
# result = query_SCADA_ID_corresponding_info(name='bb', SCADA_ID='P10755')

View File

@@ -3233,7 +3233,7 @@ async def fastapi_query_all_scheme_all_records(
return loaded_dict
results = influxdb_api.query_scheme_all_record(
scheme_Type=schemetype, scheme_name=schemename, query_date=querydate
scheme_type=schemetype, scheme_name=schemename, query_date=querydate
)
packed = msgpack.packb(results, default=encode_datetime)
redis_client.set(cache_key, packed)
@@ -3257,7 +3257,7 @@ async def fastapi_query_all_scheme_all_records_property(
all_results = msgpack.unpackb(data, object_hook=decode_datetime)
else:
all_results = influxdb_api.query_scheme_all_record(
scheme_Type=schemetype, scheme_name=schemename, query_date=querydate
scheme_type=schemetype, scheme_name=schemename, query_date=querydate
)
packed = msgpack.packb(all_results, default=encode_datetime)
redis_client.set(cache_key, packed)

View File

@@ -396,7 +396,7 @@ def flushing_analysis(
modify_pattern_start_time=modify_pattern_start_time,
modify_total_duration=modify_total_duration,
modify_valve_opening=modify_valve_opening,
scheme_Type="flushing_Analysis",
scheme_type="flushing_Analysis",
scheme_name=scheme_name,
)
# step 4. restore the base model
@@ -533,7 +533,7 @@ def contaminant_simulation(
simulation_type="extended",
modify_pattern_start_time=modify_pattern_start_time,
modify_total_duration=modify_total_duration,
scheme_Type="contaminant_Analysis",
scheme_type="contaminant_Analysis",
scheme_name=scheme_name,
)
@@ -692,7 +692,7 @@ def pressure_regulation(
modify_tank_initial_level=modify_tank_initial_level,
modify_fixed_pump_pattern=modify_fixed_pump_pattern,
modify_variable_pump_pattern=modify_variable_pump_pattern,
scheme_Type="pressure_regulation",
scheme_type="pressure_regulation",
scheme_name=scheme_name,
)
if is_project_open(new_name):