Files
TJWaterServer/api_ex/burst_locate_SCADA.py
2025-12-31 16:11:28 +08:00

2409 lines
111 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
import networkx
import pandas
import wntr
import numpy as np
import pandas as pd
import math
import copy
import sys
import networkx as nx
import pymetis
import influxdb_api
from datetime import datetime
import random
import itertools
import os
from tjnetwork import *
from api.project_backup import CopyProjectEx
import simulation
import globals
import influxdb_api
import concurrent.futures
from datetime import datetime, timedelta, timezone
import matplotlib.pyplot as plt
def manually_get_burst_time_SCADA_pressure(name: str, manually_burst_time: str, scheme_Type: str, scheme_Name: str):
"""
手动模拟爆管时获取对应的SCADA数据
:param name: 数据库名称
:param manually_burst_time: 手动模拟爆管时间,格式为 '2024-11-24T17:30:00+08:00'
:param scheme_Type: 方案类型
:param scheme_Name: 方案名称
:return:
"""
influxdb_api.query_corresponding_query_id_and_element_id(name=name)
pressure_api_query_ids_list = list(globals.scheme_pressure_ids.keys())
# print(pressure_api_query_ids_list)
# 用节点作为SCADA压力监测点名称
pressure_SCADA_IDs_list = [list(globals.scheme_pressure_ids.values())]
# print(pressure_SCADA_IDs_list)
# pressure_SCADA_IDs_list替换sensor_name
# pressure_SCADA_IDs_list = list(pressure_SCADA_IDs_list)
# normal_SCADA_pressure替换predicted_p
normal_SCADA_pressure = influxdb_api.query_SCADA_data_by_device_ID_and_time(
query_ids_list=pressure_api_query_ids_list, query_time=manually_burst_time
)
# print(normal_SCADA_pressure)
# 用 scheme_pressure_ids 中对应的value替换字典中的key并且将原字典中的value转换为数字后乘以100
normal_SCADA_pressure = {
globals.scheme_pressure_ids.get(k, k): (float(v) * 100 if v is not None else None)
for k, v in normal_SCADA_pressure.items()
}
# print(normal_SCADA_pressure)
normal_SCADA_pressure = pd.Series(normal_SCADA_pressure)
# burst_SCADA_pressure替换leak_monitored
burst_SCADA_pressure = influxdb_api.query_scheme_SCADA_data_by_device_ID_and_time(
query_ids_list=pressure_SCADA_IDs_list, query_time=manually_burst_time, scheme_Type=scheme_Type,
scheme_Name=scheme_Name
)
# print(burst_SCADA_pressure)
burst_SCADA_pressure = {
globals.scheme_pressure_ids.get(k, k): (float(v) if v is not None else None)
for k, v in burst_SCADA_pressure.items()
}
print(burst_SCADA_pressure)
burst_SCADA_pressure = pd.Series(burst_SCADA_pressure)
return burst_SCADA_pressure, pressure_SCADA_IDs_list, normal_SCADA_pressure
# 2025/03/22
def manually_get_burst_flow(scheme_Type: str, scheme_Name: str, burst_start_time: str,
bucket1: str="scheme_simulation_result", bucket2: str="SCADA_data") -> float:
"""
进行手动模拟时,用(模拟得到的流量 - 前一时刻正常的SCADA流量作为爆管流量
:param scheme_Type: 方案类型
:param scheme_Name: 方案名称
:param burst_start_time: 方案模拟开始时间,格式为 '2024-11-24T17:30:00+08:00'
:param bucket1: 方案模拟数据存储的 bucket 名称。
:param bucket2: SCADA数据存储的 bucket 名称。
:return: 最后返回 m3/s为单位的的漏损水量
"""
client = influxdb_api.get_new_client()
if not client.ping():
print("{} -- Failed to connect to InfluxDB.".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
query_api = client.query_api()
# 将北京时间转换为 UTC 时间
beijing_time = datetime.fromisoformat(burst_start_time)
utc_time = beijing_time.astimezone(timezone.utc)
utc_scheme_start_time = utc_time - timedelta(seconds=1)
utc_scheme_stop_time = utc_time + timedelta(seconds=1)
# 构建 Flux 查询语句
flux_query1 = f'''
from(bucket: "{bucket1}")
|> range(start: {utc_scheme_start_time.isoformat()}, stop: {utc_scheme_stop_time.isoformat()})
|> filter(fn: (r) => r["scheme_Type"] == "{scheme_Type}" and r["scheme_Name"] == "{scheme_Name}" and r["_measurement"] == "scheme_source_outflow")
'''
table1 = query_api.query(flux_query1)
result1 = []
for table in table1:
for record in table.records:
result1.append({
"device_ID": record["device_ID"],
"value": float(record["_value"]) * 3.6 # L/s 转换为 m3/h
})
# print(result1)
scheme_sim_flow = sum(item['value'] for item in result1)
# print(scheme_sim_flow)
# 构建查询 SCADA 数据的 Flux 语句
flux_query2 = f'''
from(bucket: "{bucket2}")
|> range(start: {utc_scheme_start_time.isoformat()}, stop: {utc_scheme_stop_time.isoformat()})
|> filter(fn: (r) => r["_measurement"] == "source_outflow_realtime")
'''
table2 = query_api.query(flux_query2)
result2 = []
for table in table2:
for record in table.records:
result2.append({
"device_ID": record["device_ID"],
"value": float(record["_value"])
})
# print(result2)
scada_flow = sum(item['value'] for item in result2)
# print(scada_flow)
client.close()
burst_flow = (scheme_sim_flow - scada_flow) / 3600
return burst_flow
# 2025/03/25
def cal_if_detect(burst_SCADA_pressure: pd.Series, normal_SCADA_pressure: pd.Series, min_dpressure: float):
"""
判断是否开始监测
:param burst_SCADA_pressure: 爆管时刻各压力表数值
:param normal_SCADA_pressure: 正常时刻各压力表数值
:param min_dpressure: 设定的压降值阈值单位m
:return:
"""
dpressure = normal_SCADA_pressure - burst_SCADA_pressure
print(dpressure)
# print(dpressure.loc['J06198'])
max_dpressure = dpressure.max()
max_dpressure_index = dpressure.idxmax()
print(max_dpressure_index)
print('max_dpressure', max_dpressure,type(max_dpressure))
print('min_dpressure', min_dpressure,type(min_dpressure))
if max_dpressure >= min_dpressure:
if_detect = 1
tag = '压降在识别范围内,开始识别。'
else:
if_detect = 0
tag = '爆管造成的的压力下降小于设定值,故不展开识别。'
print(tag)
return if_detect, tag
## Simulation Module
# load inp
def load_inp(name: str, before_burst_time: str) -> wntr.network.WaterNetworkModel:
"""
返回wntr类型的正常管网模型
:param name: 数据库名字
:param before_burst_time: 爆管发生前的时间,格式为'2024-11-25T09:00:00+08:00',
如果为手动模拟,则与爆管发生时间为同一个;如果为自动模拟则为前一个水力步长的时间
:return:
"""
new_name = f'before_burst_{name}'
if have_project(new_name):
if is_project_open(new_name):
close_project(new_name)
delete_project(new_name)
CopyProjectEx()(name, new_name, ['operation', 'current_operation', 'restore_operation', 'batch_operation', 'operation_table'])
open_project(new_name)
simulation.run_simulation(name=new_name, simulation_type='manually_temporary', modify_pattern_start_time=before_burst_time)
if is_project_open(new_name):
close_project(new_name)
delete_project(new_name)
inp_file = f'./db_inp/{new_name}.db.inp'
wn = wntr.network.WaterNetworkModel(inp_file)
return wn
# load inp information
def read_inf_inp(wn):
all_node = wn.node_name_list
node_elevation = wn.query_node_attribute('elevation')
node_coordinates = wn.query_node_attribute('coordinates')
all_pipe = wn.pipe_name_list
# 改_wz__________________________________
n_pipe = []
for p in all_pipe:
pipe = wn.get_link(p)
if pipe.initial_status == 0: # 状态为'Closed'
n_pipe.append(p)
candidate_pipe_init = list(set(all_pipe) - set(n_pipe))
pipe_start_node = wn.query_link_attribute('start_node_name', link_type=wntr.network.model.Pipe)
pipe_end_node = wn.query_link_attribute('end_node_name', link_type=wntr.network.model.Pipe)
pipe_length = wn.query_link_attribute('length')
pipe_diameter = wn.query_link_attribute('diameter')
return all_node, node_elevation, node_coordinates, candidate_pipe_init, pipe_start_node, pipe_end_node, pipe_length, pipe_diameter
def read_inf_inp_other(wn):
all_link = wn.link_name_list
pipe_start_node_all = wn.query_link_attribute('start_node_name')
pipe_end_node_all = wn.query_link_attribute('end_node_name')
return all_link, pipe_start_node_all, pipe_end_node_all
# 用新的漏损管道替换原管道
def simple_add_leak(wn, leak_mag, leak_pipe):
whole_inf = dict()
leak_pipe_self = wn.get_link(leak_pipe)
pipe_diameter = leak_pipe_self.diameter
pipe_length = leak_pipe_self.length
pipe_roughness = leak_pipe_self.roughness
pipe_minor_loss = leak_pipe_self.minor_loss
# pipe_status = leak_pipe_self.status
# pipe_check_valve = leak_pipe_self.check_valve
pipe_start_node = leak_pipe_self.start_node_name
pipe_end_node = leak_pipe_self.end_node_name
# close the pipe
# leak_pipe_self.status = 'Closed'
wn.remove_link(leak_pipe)
# add the pipe
add_pipe1 = leak_pipe + 'A'
add_pipe2 = leak_pipe + 'B'
add_node = leak_pipe + '_'
start_n = wn.get_node(pipe_start_node)
end_n = wn.get_node(pipe_end_node)
if start_n.node_type == 'Reservoir':
end_n_elevation = end_n.elevation
start_n_elevation = end_n_elevation
elif end_n.node_type == 'Reservoir':
start_n_elevation = start_n.elevation
end_n_elevation = start_n_elevation
else:
end_n_elevation = end_n.elevation
start_n_elevation = start_n.elevation
elevation_self = (start_n_elevation + end_n_elevation) / 2
coordinates_self = (
(start_n.coordinates[0] + end_n.coordinates[0]) / 2, (start_n.coordinates[1] + end_n.coordinates[1]))
wn.add_junction(add_node, base_demand=0, elevation=elevation_self, coordinates=coordinates_self)
leak_node = wn.get_node(add_node)
wn.add_pipe(add_pipe1, start_node_name=pipe_start_node, end_node_name=add_node, length=pipe_length / 2,
diameter=pipe_diameter, roughness=pipe_roughness, minor_loss=pipe_minor_loss)
wn.add_pipe(add_pipe2, start_node_name=pipe_end_node, end_node_name=add_node, length=pipe_length / 2,
diameter=pipe_diameter, roughness=pipe_roughness, minor_loss=pipe_minor_loss)
# simulation
leak_node.add_demand(base=leak_mag, pattern_name='add_leak')
whole_inf['leak_node_name'] = add_node
whole_inf['add_pipe1'] = add_pipe1
whole_inf['add_pipe2'] = add_pipe2
whole_inf['leak_pipe'] = leak_pipe
whole_inf['pipe_start_node'] = pipe_start_node
whole_inf['pipe_end_node'] = pipe_end_node
whole_inf['pipe_length'] = pipe_length
whole_inf['pipe_diameter'] = pipe_diameter
whole_inf['pipe_roughness'] = pipe_roughness
whole_inf['pipe_minor_loss'] = pipe_diameter
return wn, whole_inf, add_pipe1
def simple_recover_wn(wn, whole_inf):
leak_node = wn.get_node(whole_inf['leak_node_name'])
del leak_node.demand_timeseries_list[-1]
# update
wn.remove_link(whole_inf['add_pipe1'])
wn.remove_link(whole_inf['add_pipe2'])
wn.remove_node(whole_inf['leak_node_name'])
# open the pipe
# leak_pipe_self.status = 'Open'
wn.add_pipe(whole_inf['leak_pipe'], start_node_name=whole_inf['pipe_start_node'],
end_node_name=whole_inf['pipe_end_node'], length=whole_inf['pipe_length'],
diameter=whole_inf['pipe_diameter'], roughness=whole_inf['pipe_roughness'],
minor_loss=whole_inf['pipe_minor_loss'])
return wn
# 模拟管道漏损(而非节点漏损)
def leak_simulation_pipe_dd_multi_pf(wn, leak_mag, leak_pipe, sensor_name, sensor_f_name):
wn.options.hydraulic.demand_model = 'DD'
leak_pipe_self = wn.get_link(leak_pipe)
pipe_diameter = leak_pipe_self.diameter
pipe_length = leak_pipe_self.length
pipe_roughness = leak_pipe_self.roughness
pipe_minor_loss = leak_pipe_self.minor_loss
# pipe_status = leak_pipe_self.status
# pipe_check_valve = leak_pipe_self.check_valve
pipe_start_node = leak_pipe_self.start_node_name
pipe_end_node = leak_pipe_self.end_node_name
wn.remove_link(leak_pipe)
# add the pipe
add_pipe1 = leak_pipe + 'A'
add_pipe2 = leak_pipe + 'B'
add_node = leak_pipe + '_'
start_n = wn.get_node(pipe_start_node)
end_n = wn.get_node(pipe_end_node)
if start_n.node_type == 'Reservoir':
end_n_elevation = end_n.elevation
start_n_elevation = end_n_elevation
elif end_n.node_type == 'Reservoir':
start_n_elevation = start_n.elevation
end_n_elevation = start_n_elevation
else:
end_n_elevation = end_n.elevation
start_n_elevation = start_n.elevation
elevation_self = (start_n_elevation + end_n_elevation) / 2
coordinates_self = (
(start_n.coordinates[0] + end_n.coordinates[0]) / 2, (start_n.coordinates[1] + end_n.coordinates[1]))
wn.add_junction(add_node, base_demand=0, elevation=elevation_self, coordinates=coordinates_self)
leak_node = wn.get_node(add_node)
wn.add_pipe(add_pipe1, start_node_name=pipe_start_node, end_node_name=add_node, length=pipe_length / 2,
diameter=pipe_diameter, roughness=pipe_roughness, minor_loss=pipe_minor_loss)
wn.add_pipe(add_pipe2, start_node_name=pipe_end_node, end_node_name=add_node, length=pipe_length / 2,
diameter=pipe_diameter, roughness=pipe_roughness, minor_loss=pipe_minor_loss)
# simulation
leak_node.add_demand(base=leak_mag, pattern_name='add_leak')
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
del leak_node.demand_timeseries_list[-1]
pressure_output_all = results.node['pressure'][sensor_name]
pressure_output = pressure_output_all
if leak_pipe in sensor_f_name:
f_sensor_name = [add_pipe1 if i == leak_pipe else i for i in sensor_f_name]
flow_output = results.link['flowrate'][f_sensor_name]
flow_output.columns = sensor_f_name
else:
flow_output = results.link['flowrate'][sensor_f_name]
# update
wn.remove_link(add_pipe1)
wn.remove_link(add_pipe2)
wn.remove_node(add_node)
# open the pipe
# leak_pipe_self.status = 'Open'
wn.add_pipe(leak_pipe, start_node_name=pipe_start_node, end_node_name=pipe_end_node, length=pipe_length,
diameter=pipe_diameter, roughness=pipe_roughness, minor_loss=pipe_minor_loss)
return wn, pressure_output, flow_output
# def 2 normal simulation 正常工况下的水力模拟
def normal_simulation_pf(wn: wntr.network.WaterNetworkModel, pressure_SCADA_IDs_list: list[str], flow_SCADA_IDs_list: list[str]):
"""
在正常无漏损状态下进行水力仿真并返回监测点压力、流量、全局压力分布、最低压力点top_sensor以及总需水量。
:param wn: wntr类型的管网模型
:param pressure_SCADA_IDs_list: 监测的节点压力的ID构成的列表
:return: pressure, basic_p, top_sensor, sum_demand
"""
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
pressure_all = results.node['pressure'][pressure_SCADA_IDs_list]
pressure = pressure_all.iloc[0]
demand_all = results.node['demand']
demand = demand_all.iloc[0]
sum_demand: float = cal_sum_demand(demand)
flow_all = results.link['flowrate'][flow_SCADA_IDs_list]
flow = flow_all.iloc[0]
top_sensor: str = pressure.idxmin()
basic_p = results.node['pressure']
basic_p = basic_p.iloc[0]
return pressure, flow, basic_p, top_sensor, sum_demand
def normal_simulation_multi_pf(wn: wntr.network.WaterNetworkModel, pressure_SCADA_IDs_list: list[str], flow_SCADA_IDs_list: list[str]):
# inp_time = 0
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
pressure_all = results.node['pressure'][pressure_SCADA_IDs_list]
pressure = pressure_all
demand_all = results.node['demand']
demand = demand_all
flow = results.link['flowrate'][flow_SCADA_IDs_list]
sum_demand = pd.Series(dtype=object)
for i in range(len(demand.index)):
sum_demand[str(demand.index[i])] = cal_sum_demand(demand.iloc[i])
if type(pressure) == pd.core.series.Series:
top_sensor = pressure.idxmin()
else:
mean_pressure = pressure.mean()
top_sensor = mean_pressure.idxmin()
basic_p = results.node['pressure']
return pressure, flow, basic_p, top_sensor, sum_demand
# leak_pipe模拟漏损管道
# 获取模拟漏损后的压力(流量)监测点处压力(流量)
def simple_simulation_pf(wn, sensor_name, sensor_f_name, leak_pipe, add_pipe1):
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
pressure_all = results.node['pressure'][sensor_name]
if len(leak_pipe) > 0 and leak_pipe in sensor_f_name:
f_sensor_name = [add_pipe1 if i == leak_pipe else i for i in sensor_f_name]
flow_all = results.link['flowrate'][f_sensor_name]
flow_all.columns = sensor_f_name
else:
flow_all = results.link['flowrate'][sensor_f_name]
return pressure_all, flow_all
def cal_sum_demand(demand):
sum_demand = 0
for i in range(len(demand)):
if demand.iloc[i] > 0:
sum_demand += demand.iloc[i]
return sum_demand
def pick_center_pipe(node_x, node_y, candidate_pipe, pipe_start_node, pipe_end_node):
data_set_t = pd.DataFrame(dtype=object)
data_set_t['x'] = (node_x[pipe_start_node[candidate_pipe]].values + node_x[
pipe_start_node[candidate_pipe]].values) / 2
data_set_t['y'] = (node_y[pipe_end_node[candidate_pipe]].values + node_y[pipe_end_node[candidate_pipe]].values) / 2
data_set_t.index = list(candidate_pipe)
mean_x = data_set_t['x'].mean()
mean_y = data_set_t['y'].mean()
data_set_t['d'] = abs(data_set_t['x'] - mean_x) + abs(data_set_t['y'] - mean_y)
distance_t = data_set_t['d'].sort_values(ascending=True, inplace=False)
'''if distance_t.index==[]:
print(candidate_pipe)
else:'''
center_t = distance_t.index[0]
return center_t
def find_new_center_pipe(node_x, node_y, candidate_pipe, pipe_start_node, pipe_end_node, record_center):
new_candidate_pipe = list(set(candidate_pipe) - set(record_center))
if new_candidate_pipe == []:
new_candidate_pipe = candidate_pipe
center_t = pick_center_pipe(node_x, node_y, new_candidate_pipe, pipe_start_node, pipe_end_node)
return center_t
# 根据计算的爆管相关节点求出相关的爆管管段
def cal_area_node_linked_pipe(nodeset, node_pipe_dic):
pipeset = []
nodeset = list(nodeset)
for i in range(len(nodeset)):
temp_node = nodeset[i]
pipe = node_pipe_dic[temp_node]
pipeset = pipeset + pipe
return pipeset
# kmeans 聚类后的拓扑优化
# 构建整个图
def construct_graph(wn):
length = wn.query_link_attribute('length')
G = wn.get_graph(wn, link_weight=length)
# 转为无向图
G0 = G.to_undirected()
# A0 = np.array(nx.adjacency_graph(G0).todense())
return G0 # , A0
# cal metis grouping
def metis_grouping_pipe_weight(G0, wn, all_node_iter, candidate_pipe_input, group_num, node_x, node_y,
pipe_start_node_all,
pipe_end_node_all, node_pipe_dic, all_node_series, couple_node_length):
all_node_iter_series_new = all_node_series[all_node_iter]
all_node_iter_series_new = all_node_iter_series_new.sort_values(ascending=True)
all_node_iter_new = list(all_node_iter_series_new.index)
G1 = G0.subgraph(all_node_iter_new)
delimiter = ' '
adjacency_list = []
node_dict = {}
c_new = 0
for each_node in all_node_iter_new:
node_dict[each_node] = c_new
c_new = c_new + 1
correspond_dic = {}
count_node = 0
w = []
for line in generate_adjlist_with_all_edges(G1, delimiter):
temp_node_name = line.split(sep=delimiter)
w_temp = []
for i in range(len(temp_node_name) - 1):
temp_name_1 = temp_node_name[0] + ',' + temp_node_name[i + 1]
w_temp.append(couple_node_length[temp_name_1])
w.append(w_temp)
n_t = []
for each_node in temp_node_name:
n_t.append(node_dict[each_node])
correspond_dic[n_t[0]] = count_node
count_node = count_node + 1
# del n_t[0]
adjacency_list.append(n_t)
adjacency_list_new = [[] * 1 for i in range(len(adjacency_list))]
w_new = [[] * 1 for i in range(len(adjacency_list))]
for i in range(len(adjacency_list)):
adjacency_list_new[int(adjacency_list[i][0])] = adjacency_list[i]
w_new[int(adjacency_list[i][0])] = w[i]
for i in range(len(adjacency_list)):
del adjacency_list_new[i][0]
xadj = [0]
w_f = []
final_adjacency_list = []
for i in range(len(adjacency_list_new)):
final_adjacency_list = final_adjacency_list + adjacency_list_new[i]
xadj.append(len(final_adjacency_list))
w_f = w_f + w_new[i]
# (edgecuts, parts) = pymetis.part_graph(nparts=group_num, adjacency=adjacency_list_new)
(edgecuts, parts) = pymetis.part_graph(nparts=group_num, adjncy=final_adjacency_list, xadj=xadj, eweights=w_f)
# (edgecuts, parts) = pymetis.part_graph(nparts=group_num, adjacency=adjacency_list_new)
candidate_group_list = [[] * 1 for i in range(group_num)]
for i in range(len(all_node_iter_new)):
candidate_group_list[parts[i]].append(all_node_iter_new[i])
'''parts_new = np.zeros(len(candidate_node_input), dtype=int)
for i in range(len(candidate_group_list)):
temp_group = candidate_group_list[i]
for each_node in temp_group:
parts_new[node_dict[each_node]] = i
parts_new = list(parts_new)'''
new_center = []
new_group = []
new_all_node = []
candidate_pipe_set = set(candidate_pipe_input)
all_grouped_pipe = []
for i in range(group_num):
# 构建子图
G_sub = G0.subgraph(candidate_group_list[i])
# 计算联通子图
sub_graphs = networkx.connected_components(G_sub)
if networkx.number_connected_components(G_sub) == 1:
# 求交集
nodeset = G_sub.nodes()
pipeset_set = set(cal_area_node_linked_pipe(nodeset, node_pipe_dic))
candidate_pipe = list(pipeset_set.intersection(candidate_pipe_set))
# 判断集合是否保留
if len(candidate_pipe) > 0:
# 保留 计算中心
center_t = pick_center_pipe(node_x, node_y, candidate_pipe, pipe_start_node_all, pipe_end_node_all)
# 更新
new_center.append(center_t)
new_group.append(candidate_pipe)
new_all_node.append(nodeset)
all_grouped_pipe = all_grouped_pipe + candidate_pipe
else:
for c in sub_graphs:
G_temp = G0.subgraph(c)
nodeset = G_temp.nodes()
pipeset = cal_area_node_linked_pipe(nodeset, node_pipe_dic)
pipeset_set = set(pipeset)
# 求交集
candidate_pipe = list(pipeset_set.intersection(candidate_pipe_set))
# print(len(candidate_node))
# 判断集合是否保留
if len(candidate_pipe) > 0:
# 保留 计算中心
center_t = pick_center_pipe(node_x, node_y, candidate_pipe, pipe_start_node_all, pipe_end_node_all)
# 更新
new_center.append(center_t)
new_group.append(candidate_pipe)
new_all_node.append(nodeset)
all_grouped_pipe = all_grouped_pipe + candidate_pipe
record_center = []
c_g = 0
for each_group in new_group:
if len(each_group) < 3:
record_center.append(new_center[c_g])
c_g += 1
c_g = 0
for each_group in new_group:
if len(each_group) >= 3:
if new_center[c_g] in record_center:
new_center[c_g] = find_new_center_pipe(node_x, node_y, each_group, pipe_start_node_all,
pipe_end_node_all, record_center)
record_center.append(new_center[c_g])
c_g += 1
visualize_metis_partition(
G0, new_center, new_group,
node_x, node_y,
pipe_start_node_all, pipe_end_node_all
)
return new_center, new_group, new_all_node
def visualize_metis_partition(
G, center_pipes, pipe_groups,
node_x, node_y,
pipe_start_node_all, pipe_end_node_all
):
"""
可视化METIS分区结果单图模式
参数:
G: 原始管网图(nx.Graph)
center_pipes: 中心管道列表(list)
pipe_groups: 分组管道列表(list of lists)
node_x: 节点X坐标字典(dict)
node_y: 节点Y坐标字典(dict)
pipe_start_node_all: 管道起点字典(dict)
pipe_end_node_all: 管道终点字典(dict)
"""
plt.figure(figsize=(9, 10))
# 生成颜色映射(自动扩展颜色数量)
colors = plt.cm.tab20(np.linspace(0, 1, len(pipe_groups)))
# --- 绘制背景管网(灰色半透明) ---
for edge in G.edges():
start_node, end_node = edge
plt.plot(
[node_x[start_node], node_x[end_node]],
[node_y[start_node], node_y[end_node]],
color='lightgray',
linewidth=0.5,
alpha=0.3,
zorder=1 # 确保背景在底层
)
# --- 绘制各分区管道(彩色)---
legend_handles = [] # 用于图例的句柄
for i, (group, center) in enumerate(zip(pipe_groups, center_pipes)):
color = colors[i % len(colors)] # 循环使用颜色
# 绘制分组管道
for pipe in group:
start = pipe_start_node_all[pipe]
end = pipe_end_node_all[pipe]
line = plt.plot(
[node_x[start], node_x[end]],
[node_y[start], node_y[end]],
color=color,
linewidth=2.5,
alpha=0.8,
zorder=2
)
# 只为每个分组的第一个管道添加图例句柄
if pipe == group[0]:
legend_handles.append(line[0])
# 高亮中心管道(红色虚线)
if center in pipe_start_node_all and center in pipe_end_node_all:
start = pipe_start_node_all[center]
end = pipe_end_node_all[center]
plt.plot(
[node_x[start], node_x[end]],
[node_y[start], node_y[end]],
color='red',
linewidth=4,
linestyle='--',
dash_capstyle='round',
zorder=3 # 确保中心管道在最顶层
)
# --- 添加图例和标注 ---
# 分组图例
group_labels = [f'Group {i + 1}' for i in range(len(pipe_groups))]
plt.legend(
legend_handles,
group_labels,
loc='upper right',
title="Partitions",
fontsize=8,
title_fontsize=10
)
# 中心管道标注(可选)
for i, center in enumerate(center_pipes):
if center in pipe_start_node_all:
x = (node_x[pipe_start_node_all[center]] + node_x[pipe_end_node_all[center]]) / 2
y = (node_y[pipe_start_node_all[center]] + node_y[pipe_end_node_all[center]]) / 2
plt.text(
x, y,
f'C{i + 1}',
color='red',
fontsize=10,
ha='center',
va='center',
bbox=dict(facecolor='white', alpha=0.8, edgecolor='none')
)
# --- 图形美化 ---
plt.title("Water Network Partitioning Overview", fontsize=14, pad=20)
plt.xlabel("X Coordinate", fontsize=10)
plt.ylabel("Y Coordinate", fontsize=10)
plt.grid(True, alpha=0.2, linestyle=':')
plt.tight_layout()
# 显示图形
plt.show()
#
def generate_adjlist_with_all_edges(G, delimiter):
for s, nbrs in G.adjacency():
line = str(s) + delimiter
for t, data in nbrs.items():
line += str(t) + delimiter
yield line[: -len(delimiter)]
#
# Similarity matching module
def cal_similarity_simple_return_dd(similarity_mode, monitor_p, predict_p, normal_p, leak_p,
monitor_p_all, predict_p_all, normal_p_all, leak_p_all,
important_sensor, mean_dpressure, dpressure_std, dpressure_std_all, if_gy=0,
cos_or_flow=1):
# cos_or_flow 用于 CAF
dpressure_s = normal_p - leak_p
dpressure = predict_p - monitor_p
act_dpressure = pd.Series(dtype=object)
for i in range(len(leak_p.index)):
if dpressure_std.iloc[i] > -200: # 0.001:
if if_gy == 1:
act_dpressure[leak_p.index[i]] = (leak_p.iloc[i] - monitor_p.iloc[i]) / dpressure_std.iloc[i]
else:
act_dpressure[leak_p.index[i]] = leak_p.iloc[i] - monitor_p.iloc[i]
if similarity_mode == 'COS' or (similarity_mode == 'CAF' and cos_or_flow == 1):
'''if leak_p.min()<0:
none_flag = 1
similarity_cos = 0
similarity_dis = 0
else:'''
none_flag = 0
sensor_for_cos = list(set(dpressure_s.index).intersection(set(act_dpressure.index)))
'''if len(dpressure_s) ==0 or len(dpressure) ==0:
jj=9
else:'''
try:
s1 = np.dot(np.transpose(dpressure_s.loc[sensor_for_cos]), dpressure.loc[sensor_for_cos])
s2 = np.linalg.norm(dpressure_s.loc[sensor_for_cos]) * np.linalg.norm(dpressure.loc[sensor_for_cos])
if s2 == 0:
s2 = s2 + 0.0001
similarity_cos = s1 / s2
similarity_dis = 0
except Exception as e:
print(dpressure_s)
print(sensor_for_cos)
print(act_dpressure)
print(dpressure_std)
print(dpressure)
elif similarity_mode == 'DIS' or (similarity_mode == 'CAF' and cos_or_flow == 2):
'''if leak_p.min()<0:
none_flag = 1
else:'''
none_flag = 0
important_sensor = list(set(important_sensor).intersection(set(act_dpressure.index)))
part_dpressure = dpressure_s[important_sensor] - dpressure[important_sensor]
similarity_pre_DIS = np.linalg.norm(part_dpressure)
# similarity_pre_DIS_later = 1 / (1 + similarity_pre_DIS)
similarity_dis = similarity_pre_DIS
similarity_cos = 0
elif similarity_mode == 'CAD_new':
act_dpressure = leak_p - monitor_p
'''if leak_p.min() < 0:
none_flag = 1
similarity_cos = 0
similarity_dis =0
else:'''
none_flag = 0
# cos
s1 = np.dot(np.transpose(dpressure_s), dpressure)
s2 = np.linalg.norm(dpressure_s) * np.linalg.norm(dpressure)
if s2 == 0:
s2 = s2 + 0.0001
similarity_cos = s1 / s2
# DIS
part_dpressure = act_dpressure.loc[important_sensor]
similarity_pre_DIS = np.linalg.norm(part_dpressure)
similarity_pre_DIS_later = 1 / (1 + similarity_pre_DIS)
similarity_dis = similarity_pre_DIS
elif similarity_mode == 'CAD_new_gy' or similarity_mode == 'CDF':
# cos
sensor_for_cos = list(set(dpressure_s.index).intersection(set(act_dpressure.index)))
if len(sensor_for_cos) == 0 and len(dpressure_s) == 0:
similarity_cos = 0
elif len(sensor_for_cos) == 0 and len(dpressure_s) > 0:
sensor_for_cos = list(dpressure_s.index)
none_flag = 0
s1 = np.dot(np.transpose(dpressure_s.loc[sensor_for_cos]), dpressure.loc[sensor_for_cos])
s2 = np.linalg.norm(dpressure_s.loc[sensor_for_cos]) * np.linalg.norm(dpressure.loc[sensor_for_cos])
if s2 == 0:
s2 = s2 + 0.0001
similarity_cos = s1 / s2
else:
none_flag = 0
s1 = np.dot(np.transpose(dpressure_s.loc[sensor_for_cos]), dpressure.loc[sensor_for_cos])
s2 = np.linalg.norm(dpressure_s.loc[sensor_for_cos]) * np.linalg.norm(dpressure.loc[sensor_for_cos])
if s2 == 0:
s2 = s2 + 0.0001
similarity_cos = s1 / s2
# DIS
important_sensor_new = list(set(important_sensor).intersection(set(act_dpressure.index)))
if len(important_sensor_new) == 0:
important_sensor_new = important_sensor
act_dpressure = pd.Series(dtype=object)
for i in range(len(leak_p_all.index)):
# if dpressure_std.iloc [i] > -200: # 0.001:
if if_gy == 1:
act_dpressure[leak_p_all.index[i]] = (leak_p_all.iloc[i] - monitor_p_all.iloc[i]) / \
dpressure_std_all.iloc[i]
else:
act_dpressure[leak_p_all.index[i]] = leak_p_all.iloc[i] - monitor_p_all.iloc[i]
# part_dpressure = act_dpressure.loc[important_sensor_new]
part_dpressure = (dpressure.loc[important_sensor_new] - dpressure_s.loc[important_sensor_new])
similarity_pre_DIS = np.linalg.norm(part_dpressure) ## chang test
# part_dpressure = dpressure_s.loc[important_sensor]-dpressure.loc[important_sensor]
# similarity_pre_DIS = np.linalg.norm(part_dpressure)
# similarity_pre_DIS_later = 1 / (1 + similarity_pre_DIS)
similarity_dis = similarity_pre_DIS
elif similarity_mode == 'OF':
# cos
similarity_cos = 0
none_flag = 0
# DIS
important_sensor_new = list(set(important_sensor).intersection(set(act_dpressure.index)))
if len(important_sensor_new) == 0:
important_sensor_new = important_sensor
act_dpressure = pd.Series(dtype=object)
for i in range(len(leak_p_all.index)):
# if dpressure_std.iloc [i] > -200: # 0.001:
if if_gy == 1:
act_dpressure[leak_p_all.index[i]] = (leak_p_all.iloc[i] - monitor_p_all.iloc[i]) / \
dpressure_std_all.iloc[i]
else:
act_dpressure[leak_p_all.index[i]] = leak_p_all.iloc[i] - monitor_p_all.iloc[i]
# part_dpressure = act_dpressure.loc[important_sensor_new]
part_dpressure = (dpressure.loc[important_sensor_new] - dpressure_s.loc[important_sensor_new])
similarity_pre_DIS = np.linalg.norm(part_dpressure) ## chang test
# part_dpressure = dpressure_s.loc[important_sensor]-dpressure.loc[important_sensor]
# similarity_pre_DIS = np.linalg.norm(part_dpressure)
# similarity_pre_DIS_later = 1 / (1 + similarity_pre_DIS)
similarity_dis = similarity_pre_DIS
return similarity_cos, similarity_dis, none_flag
def adjust(similarity_cos, similarity_dis, record_success_candidate, record_success_no_candidate):
if len(record_success_no_candidate) > 0:
for each in record_success_no_candidate:
similarity_cos[each] = similarity_cos[record_success_candidate].min() * 0.9
similarity_dis[each] = similarity_dis[record_success_candidate].max() * 1.1
return similarity_cos, similarity_dis
def cal_sq_all_multi(similarity_cos, similarity_dis, similarity_f, candidate_pipe, timestep_list_spc, if_flow,
if_only_cos, if_only_flow,
cos_h_input, dis_h_input, dis_f_h_input, if_compalsive, cos_sensor_num, flow_sensor_num):
if if_only_flow == 1:
similarity_f, h_f = cal_sq_single_array(similarity_f.values.reshape((-1, 1)), if_direct=2)
sq_cos = 0
sq_dis = 0
sq_f = 1
similarity_all = similarity_f * sq_f
output_similarity = similarity_all.reshape((-1, len(candidate_pipe)))
output_similarity_pd = pd.DataFrame(output_similarity, index=timestep_list_spc, columns=candidate_pipe)
else:
if if_only_cos == 0:
if if_flow == 1:
# standerdize
similarity_cos, h_cos = cal_sq_single_array(similarity_cos.values.reshape((-1, 1)), if_direct=1)
similarity_dis, h_dis = cal_sq_single_array(similarity_dis.values.reshape((-1, 1)), if_direct=2)
similarity_f, h_f = cal_sq_single_array(similarity_f.values.reshape((-1, 1)), if_direct=2)
if if_compalsive == 1:
sq_cos = cos_h_input
sq_dis = dis_h_input
sq_f = dis_f_h_input
else:
'''sq_cos = h_cos/(h_cos +h_dis +h_f )
sq_dis = h_dis/(h_cos +h_dis +h_f )
sq_f = h_f/(h_cos +h_dis +h_f )'''
sq_cos, sq_dis, sq_f = add_weight_for_SQ(h_cos, h_dis, h_f, cos_sensor_num, flow_sensor_num)
'''if cos_sensor_num == 2 and sq_cos>0.2:
sq_cos = 0.2
sq_dis = 0.8*h_dis / (h_dis + h_f)
sq_f = 0.8*h_f / (h_dis + h_f)
if cos_sensor_num == 1 and sq_dis > 0.3:
sq_cos = 0.1
sq_dis = 0.3
sq_f = 0.6'''
sq_cos, sq_dis, sq_f = adjust_ratio('CDF', sq_cos, sq_dis, sq_f)
if cos_sensor_num <= 1:
sq_cos = 0
# similarity
similarity_all = similarity_cos * sq_cos + similarity_dis * sq_dis + similarity_f * sq_f
output_similarity = similarity_all.reshape((-1, len(candidate_pipe)))
output_similarity_pd = pd.DataFrame(output_similarity, index=timestep_list_spc, columns=candidate_pipe)
else:
# standerdize
similarity_cos, h_cos = cal_sq_single_array(similarity_cos.values.reshape((-1, 1)), if_direct=1)
similarity_dis, h_dis = cal_sq_single_array(similarity_dis.values.reshape((-1, 1)), if_direct=2)
if if_compalsive == 1:
sq_cos = cos_h_input
sq_dis = dis_h_input
else:
sq_cos = h_cos / (h_cos + h_dis)
sq_dis = h_dis / (h_cos + h_dis)
if cos_sensor_num == 2 and sq_cos > 0.5:
sq_cos = 0.5
sq_dis = 0.5
sq_cos, sq_dis, sq_f = adjust_ratio('CAD_new_gy', sq_cos, sq_dis, 0)
sq_f = 0
# similarity
similarity_all = similarity_cos * sq_cos + similarity_dis * sq_dis
output_similarity = similarity_all.reshape((-1, len(candidate_pipe)))
output_similarity_pd = pd.DataFrame(output_similarity, index=timestep_list_spc, columns=candidate_pipe)
elif if_only_cos == 1:
if if_flow == 1:
# standerdize
similarity_cos, h_cos = cal_sq_single_array(similarity_cos.values.reshape((-1, 1)), if_direct=1)
similarity_f, h_f = cal_sq_single_array(similarity_f.values.reshape((-1, 1)), if_direct=1) # if_direct=2
if if_compalsive == 1:
sq_cos = cos_h_input
sq_f = dis_f_h_input
else:
sq_cos = h_cos / (h_cos + h_f)
sq_f = h_f / (h_cos + h_f)
sq_cos, sq_dis, sq_f = adjust_ratio('CAF', sq_cos, 0, sq_f)
sq_dis = 0
# similarity
similarity_all = similarity_cos * sq_cos + similarity_f * sq_f
output_similarity = similarity_all.reshape((-1, len(candidate_pipe)))
output_similarity_pd = pd.DataFrame(output_similarity, index=timestep_list_spc, columns=candidate_pipe)
else:
sq_cos = cos_h_input
sq_dis = dis_h_input
sq_f = dis_f_h_input
output_similarity_pd = similarity_cos
else:
sq_cos = cos_h_input
sq_dis = dis_h_input
sq_f = dis_f_h_input
output_similarity_pd = 1 / (similarity_dis + 1)
return output_similarity_pd, sq_cos, sq_dis, sq_f
def add_weight_for_SQ(h_cos, h_dis, h_f, sensor_cos_num, sensor_f_num):
h_f_new = h_f * sensor_f_num
if sensor_cos_num <= 1:
h_cos_new = 0
h_dis_new = h_dis * sensor_cos_num
else:
h_cos_new = h_cos * sensor_cos_num # / 2
h_dis_new = h_dis * sensor_cos_num # / 2
cos_sq = h_cos_new / (h_cos_new + h_dis_new + h_f_new)
dis_sq = h_dis_new / (h_cos_new + h_dis_new + h_f_new)
f_sq = h_f_new / (h_cos_new + h_dis_new + h_f_new)
if sensor_cos_num == 2 and cos_sq > 0.2:
cos_sq = 0.2
dis_sq = 0.8 * h_dis_new / (h_dis_new + h_f_new)
f_sq = 0.8 * h_f_new / (h_dis_new + h_f_new)
'''if sensor_cos_num == 1:
if dis_sq / f_sq > sensor_cos_num/sensor_f_num:
dis_sq = sensor_cos_num/sensor_f_num
f_sq=1-dis_sq'''
# if h_dis_new/h_f_new > sensor_cos_num/sensor_f_num
return cos_sq, dis_sq, f_sq
def cal_sq_single_array(similarity_pre, if_direct):
if similarity_pre.max() - similarity_pre.min() == 0:
similarity_pre = np.ones(similarity_pre.shape)
else:
if if_direct == 1:
similarity_pre = 0.998 * (similarity_pre - similarity_pre.min()) / (
similarity_pre.max() - similarity_pre.min()) + 0.002
else:
similarity_pre = 0.998 * (similarity_pre.max() - similarity_pre) / (
similarity_pre.max() - similarity_pre.min()) + 0.002
# calculate pij
similarity_p = similarity_pre / similarity_pre.sum()
# cal xinxishang
similarity_lnp = np.zeros((len(similarity_pre), 1))
for j in range(len(similarity_p)):
similarity_lnp[j] = -similarity_p[j] * math.log(similarity_p[j], math.e)
h = 1 - 1 / math.log(len(similarity_pre), math.e) * similarity_lnp.sum()
return similarity_pre, h
def cal_similarity_all_multi_new_sq_improve_double_lzr(candidate_pipe, similarity_mode, pressure_leak, monitor_p,
predict_p, normal_p, if_flow, if_only_cos, if_only_flow,
flow_leak, monitor_f, predict_f, normal_f,
timestep_list, Top_sensor_num, if_gy, effective_sensor, cos_h,
dis_h, dis_f_h, if_compalsive, max_flow):
similarity = pd.Series(dtype=float, index=candidate_pipe)
important_p_sensor = cal_top_sensors(monitor_p, predict_p, Top_sensor_num)
# important_f_sensor, basic_f = cal_top_f_sensor(normal_f)
important_f_sensor = monitor_f.columns
if len(important_p_sensor) > 0 or len(important_f_sensor) > 0: # if len(important_p_sensor) > 0
break_flag = 0
pressure_leak_new = pressure_leak.swaplevel()
flow_leak_new = flow_leak.swaplevel()
total_similarity_cos = pd.DataFrame(index=timestep_list, columns=candidate_pipe)
total_similarity_dis = pd.DataFrame(index=timestep_list, columns=candidate_pipe)
total_similarity_dis_f = pd.DataFrame(index=timestep_list, columns=candidate_pipe)
total_similarity_cos_f = pd.DataFrame(index=timestep_list, columns=candidate_pipe)
for timestep in timestep_list:
# cal p_cos, p_dis, f_dis
if if_only_flow != 1:
pressure_leak_temp = pressure_leak_new.loc[timestep].loc[:, effective_sensor]
monitor_p_temp = monitor_p.loc[timestep, effective_sensor]
predict_p_temp = predict_p.loc[timestep, effective_sensor]
normal_p_temp = normal_p.loc[timestep, effective_sensor]
total_similarity_cos.loc[timestep, :], total_similarity_dis.loc[timestep,
:] = cal_similarity_all_cos_dis(
candidate_pipe, pressure_leak_temp, similarity_mode, monitor_p_temp, predict_p_temp,
normal_p_temp, pressure_leak_new.loc[timestep].loc[:, monitor_p.columns],
monitor_p.loc[timestep, :],
predict_p.loc[timestep, :], normal_p.loc[timestep, :],
important_p_sensor, if_gy, cos_or_flow=1)
if if_flow == 1:
if len(timestep_list) == 1:
leak_f_temp = flow_leak_new.loc[timestep].loc[:, important_f_sensor]
monitor_f_temp = monitor_f.loc[timestep, important_f_sensor]
predict_f_temp = predict_f.loc[timestep, important_f_sensor]
normal_f_temp = normal_f.loc[timestep, important_f_sensor]
basic_normal_f_temp = abs(max_flow.loc[important_f_sensor])
leak_f_temp = leak_f_temp / basic_normal_f_temp
monitor_f_temp = monitor_f_temp / basic_normal_f_temp
predict_f_temp = predict_f_temp / basic_normal_f_temp
normal_f_temp = normal_f_temp / basic_normal_f_temp
else:
basic_f = abs(max_flow.loc[important_f_sensor])
leak_f_temp = flow_leak_new.loc[timestep].loc[:, important_f_sensor] / basic_f
monitor_f_temp = monitor_f.loc[timestep, important_f_sensor] / basic_f
predict_f_temp = predict_f.loc[timestep, important_f_sensor] / basic_f
normal_f_temp = normal_f.loc[timestep, important_f_sensor] / basic_f
total_similarity_cos_f.loc[timestep, :], total_similarity_dis_f.loc[timestep, :] = cal_similarity_all_cos_dis(candidate_pipe, leak_f_temp,
similarity_mode,
monitor_f_temp, predict_f_temp,
normal_f_temp,
flow_leak_new.loc[
timestep].loc[:,
monitor_f.columns],
monitor_f.loc[timestep, :],
predict_f.loc[timestep, :],
normal_f.loc[timestep, :],
important_f_sensor, if_gy,
cos_or_flow=1) # cos_or_flow=2
else:
total_similarity_cos_f = [] # dis
similarity_all, cos_h, dis_h, dis_f_h = cal_sq_all_multi(total_similarity_cos, total_similarity_dis,
total_similarity_cos_f, candidate_pipe, timestep_list,
if_flow, if_only_cos, if_only_flow, cos_h, dis_h,
dis_f_h,
if_compalsive, len(important_p_sensor),
len(important_f_sensor))
if len(timestep_list) == 1:
similarity = similarity_all.iloc[0]
elif len(timestep_list) > 3:
for each_candidate in candidate_pipe:
similarity[each_candidate] = remove_3_sigma(similarity_all.loc[:, each_candidate])
else:
for each_candidate in candidate_pipe:
similarity[each_candidate] = similarity_all.loc[:, each_candidate].mean()
similarity = similarity.sort_values(ascending=False)
else:
break_flag = 1
similarity = 0
cos_h = 0
dis_h = 0
dis_f_h = 0
return similarity, cos_h, dis_h, dis_f_h, break_flag
def cal_similarity_all_cos_dis(candidate_pipe, pressure_leak, similarity_mode, monitor_p, predict_p, normal_p,
pressure_leak_all, monitor_p_all, predict_p_all, normal_p_all,
important_sensor, if_gy, cos_or_flow):
similarity_cos = pd.Series(dtype=float, index=candidate_pipe)
similarity_dis = pd.Series(dtype=float, index=candidate_pipe)
dpressure = normal_p - pressure_leak
# 无用 ----------------------------------------------
mean_dpressure = dpressure.mean()
monitor_new = pd.DataFrame(index=['monitor'], columns=monitor_p.index)
monitor_new.iloc[0] = monitor_p
add_m_leak_pressure = [pressure_leak, monitor_p]
add_m_leak_pressure = pd.concat(add_m_leak_pressure)
pressure_leak_std = np.std(add_m_leak_pressure, ddof=1)
pressure_leak_std = pd.Series(pressure_leak_std, index=pressure_leak.columns)
add_m_leak_pressure_all = [pressure_leak_all, monitor_p_all]
add_m_leak_pressure_all = pd.concat(add_m_leak_pressure_all)
pressure_leak_std_all = np.std(add_m_leak_pressure_all, ddof=1)
pressure_leak_std_all = pd.Series(pressure_leak_std_all, index=pressure_leak.columns)
# 无用 ----------------------------------------------
monitor_p_temp = monitor_p
predict_p_temp = predict_p
normal_p_temp = normal_p
monitor_p_temp_all = monitor_p_all
predict_p_temp_all = predict_p_all
normal_p_temp_all = normal_p_all
record_success_candidate = []
record_success_no_candidate = []
for i in range(len(candidate_pipe)):
leak_p = pressure_leak.iloc[i, :]
leak_p_all = pressure_leak_all.iloc[i, :]
similarity_cos.iloc[i], similarity_dis.iloc[i], none_flag = cal_similarity_simple_return_dd(
similarity_mode, monitor_p_temp, predict_p_temp, normal_p_temp, leak_p,
monitor_p_temp_all, predict_p_temp_all, normal_p_temp_all, leak_p_all,
important_sensor, mean_dpressure, pressure_leak_std, pressure_leak_std_all, if_gy, cos_or_flow)
if none_flag == 0:
record_success_candidate.append(candidate_pipe[i])
else:
record_success_no_candidate.append(candidate_pipe[i])
similarity_cos, similarity_dis = adjust(similarity_cos, similarity_dis, record_success_candidate,
record_success_no_candidate)
return similarity_cos, similarity_dis
def cal_top_f_sensor(normal_f):
if type(normal_f) == pd.core.frame.DataFrame:
mean_f = normal_f.mean()
else:
mean_f = normal_f
output_sensor = []
output_normal_f = pd.Series(dtype=object)
for i in range(len(mean_f.index)):
if abs(mean_f.iloc[i]) > 0.01 / 3600:
output_sensor.append(mean_f.index[i])
output_normal_f[mean_f.index[i]] = mean_f.iloc[i]
return output_sensor, output_normal_f
def cal_top_sensors(monitor_p, predict_p, Top_sensor_num):
dpressure = abs(predict_p - monitor_p)
if type(dpressure) == pd.core.frame.DataFrame:
dpressure = dpressure.mean()
dpressure_rank = dpressure.sort_values(ascending=False)
return list(dpressure_rank.index[:Top_sensor_num])
def remove_3_sigma(similarity_t):
all_sample = len(similarity_t.index)
apart_sample = math.ceil(all_sample * 0.6)
similarity = similarity_t.astype('float')
mean_t = similarity.mean()
std_t = similarity.std()
new_similarity = similarity[(similarity <= mean_t + 3 * std_t) & (similarity >= mean_t - 3 * std_t)]
mean_t_new = new_similarity.mean()
return mean_t_new
## other functions
# OF 1
def cal_pipe_coordinate(all_pipe, pipe_start_node, pipe_end_node, node_coordinates):
pipe_num = len(all_pipe)
pipe_coordinates = np.zeros([pipe_num, 2])
pipe_x = copy.deepcopy(pipe_start_node)
pipe_y = copy.deepcopy(pipe_start_node)
for i in range(pipe_num):
temp_pipe = all_pipe[i]
pipe_x[temp_pipe] = (node_coordinates[pipe_start_node[temp_pipe]][0] +
node_coordinates[pipe_end_node[temp_pipe]][0]) / 2
pipe_y[temp_pipe] = (node_coordinates[pipe_start_node[temp_pipe]][1] +
node_coordinates[pipe_end_node[temp_pipe]][1]) / 2
return pipe_x, pipe_y
# OF 1+
def cal_node_coordinate(all_node, node_coordinates):
node_x = copy.deepcopy(node_coordinates)
node_y = copy.deepcopy(node_coordinates)
for i in range(len(node_x)):
temp_node = all_node[i]
node_x[temp_node] = node_coordinates[temp_node][0]
node_y[temp_node] = node_coordinates[temp_node][1]
return node_x, node_y
# OF 4
def cal_signature_pipe_multi_pf(wn, leak_mag, candidate_center, timestep_list, sensor_name, sensor_f_name):
candidate_center_num = len(candidate_center)
pressure_leak = pd.DataFrame(index=pd.MultiIndex.from_product([candidate_center, timestep_list]),
columns=sensor_name)
flow_leak = pd.DataFrame(index=pd.MultiIndex.from_product([candidate_center, timestep_list]),
columns=sensor_f_name)
pressure_leak = pressure_leak.sort_index()
flow_leak = flow_leak.sort_index()
for i in range(candidate_center_num):
wn, pressure_output, flow_output = leak_simulation_pipe_dd_multi_pf(wn, leak_mag, candidate_center[i],
sensor_name, sensor_f_name)
# leak_or_not_list.append(leak_or_not)
pressure_leak.loc[candidate_center[i]].loc[:, :] = pressure_output
flow_leak.loc[candidate_center[i]].loc[:, :] = flow_output
sys.stdout.write('\r' + '已经完成计算' + str(i + 1) + '个特征中心')
return pressure_leak, flow_leak, candidate_center
# OF 6 根据流量计算可能爆管的管段及最小爆管管径
def cal_possible_pipe(leak_flow, all_pipe, pipe_diameter):
basic_pressure = 10 # 基础压力
discharge_coeff = 0.6 # 经验系数
break_area_ratio = 1 # 爆管面积比 0.5 1.25
break_area = leak_flow / (discharge_coeff * math.sqrt(2 * basic_pressure * 9.81)) # 爆管面积 m3/h
'''break_area_diameter = math.sqrt(4 * break_area / math.pi)
min_diameter = (math.ceil(1000 * break_area_diameter / break_area_ratio)) / 1000'''
break_area_diameter = math.sqrt(4 * break_area / math.pi / break_area_ratio) # 爆管直径
min_diameter = (math.ceil(1000 * break_area_diameter)) / 1000 # 向上取整
new_all_pipe = pick_pipe(all_pipe, pipe_diameter, min_diameter)
return new_all_pipe, min_diameter
# Press the green button in the gutter to run the script.
def extract_links(data, link_types, direction):
return [
link
for res_data in data.values()
for link_type in link_types
for link in res_data[link_type][direction]
]
def add_noise_pd(data, noise_type, noise_para):
output_data = copy.deepcopy(data)
if type(output_data) == pd.core.frame.Series:
if noise_type == 'uni':
for x in output_data.index:
noise = (np.random.random() - 0.5) * 2
output_data[x] = output_data[x] + noise * noise_para
elif noise_type == 'gauss':
noise = np.random.normal(loc=0, scale=noise_para, size=output_data.shape)
output_data = output_data + noise
elif type(output_data) == pd.core.frame.DataFrame:
if noise_type == 'uni':
noise = (np.random.random(size=output_data.shape) - 0.5) * 2
output_data = output_data + noise * noise_para
elif noise_type == 'gauss':
noise = np.random.normal(loc=0, scale=noise_para, size=output_data.shape)
output_data = output_data + noise
return output_data
def add_noise_number(data, noise_type, noise_para):
output_data = copy.deepcopy(data)
if noise_type == 'uni':
noise = (np.random.random() - 0.5) * 2
output_data = output_data + noise * noise_para
elif noise_type == 'gauss':
noise = random.gauss(0, noise_para)
output_data = output_data + noise
return output_data
def add_noise_number_flow(data, noise_para_mean, noise_para_std1, noise_para_std2):
output_data = copy.deepcopy(data)
noise_flag1 = (np.random.random() - 0.5)
if noise_flag1 < 0:
noise = noise_para_mean - abs(np.random.normal(loc=0, scale=noise_para_std1))
else:
noise = noise_para_mean + abs(np.random.normal(loc=0, scale=noise_para_std2))
noise_flag2 = (np.random.random() - 0.5)
if noise_flag2 < 0:
noise_f = noise * (-1)
else:
noise_f = noise
output_data = output_data + noise_f
return output_data
def produce_noise_number(noise_type, noise_para):
if noise_type == 'uni':
noise = (np.random.random() - 0.5) * 2
noise = noise * noise_para
elif noise_type == 'gauss':
noise = random.gauss(0, noise_para)
else:
noise = 0
return noise
def add_noise_percentage_pd(data, noise_type, noise_para):
output_data = copy.deepcopy(data)
if type(output_data) == pd.core.frame.Series:
if noise_type == 'uni':
for x in output_data.index:
noise = (np.random.random() - 0.5) * 2
output_data[x] = output_data[x] * (1 + noise * noise_para / 100)
elif noise_type == 'gauss':
for x in output_data.index:
noise = np.random.gauss(0, noise_para)
output_data[x] = output_data[x] * (1 + noise / 100)
# std_noise = noise.std()
elif type(output_data) == pd.core.frame.DataFrame:
if noise_type == 'uni':
noise = (np.random.random(size=output_data.shape) - 0.5) * 2
output_data = output_data * (1 + noise * noise_para / 100)
elif noise_type == 'gauss':
noise = np.random.normal(loc=0, scale=noise_para, size=output_data.shape)
output_data = output_data * (1 + noise / 100)
# std_noise = noise.std().mean()
return output_data
def produce_pattern_value(wn, all_node):
wn_o = copy.deepcopy(wn)
# 改_wz_____________________________
sample_node = wn_o.get_node(all_node[0])
num_categories = len(sample_node.demand_timeseries_list)
columns = [f'D{i}' for i in range(num_categories)]
basic_demand_pd = pd.DataFrame(index=all_node, columns=columns)
for each in all_node:
node = wn_o.get_node(each)
for i in range(num_categories):
basic_demand_pd.loc[each, columns[i]] = node.demand_timeseries_list[i].base_value
return basic_demand_pd
# 添加噪声
def add_noise_in_wn_pf(wn, pipe_c_noise, timestep_list, pipe_coefficient, sensor_name, sensor_f_name, all_node,
basic_demand_pd,
noise_type, noise_para, leak_pipe, leak_flow):
wn.options.time.duration = 0
pipe_roughness_change = add_noise_pd(pipe_coefficient, noise_type, pipe_c_noise)
wn = change_para_of_wn(wn, pipe_roughness_change)
record_pressure = pd.DataFrame(index=timestep_list, columns=sensor_name)
record_flow = pd.DataFrame(index=timestep_list, columns=sensor_f_name)
record_noise_all = pd.DataFrame(index=pd.MultiIndex.from_product([timestep_list, all_node]),
columns=basic_demand_pd.columns)
record_noise_all = record_noise_all.sort_index()
# normal 获取添加噪声后的监测点数据
for i in range(len(timestep_list)):
wn, record_noise = change_node_demand(wn, basic_demand_pd, all_node, noise_type, noise_para)
record_noise_all.loc[timestep_list[i]].loc[:, :] = record_noise
pressure_temp, flow_temp = simple_simulation_pf(wn, sensor_name, sensor_f_name, [],
[])
record_pressure.iloc[i, :] = pressure_temp
record_flow.iloc[i, :] = flow_temp
# leak_simulation 获取添加漏损后的监测点数据
record_pressure_leak = pd.DataFrame(index=timestep_list, columns=sensor_name)
record_flow_leak = pd.DataFrame(index=timestep_list, columns=sensor_f_name)
# 改_wz_________________________________________
# add leak
wn, whole_inf, add_pipe1 = simple_add_leak(wn, leak_flow, leak_pipe)
# simulation
for i in range(len(timestep_list)):
record_noise = record_noise_all.loc[timestep_list[i]]
wn = change_node_demand_leak(wn, record_noise, all_node)
pressure_temp, flow_temp = simple_simulation_pf(wn, sensor_name, sensor_f_name,
leak_pipe, add_pipe1)
record_pressure_leak.iloc[i, :] = pressure_temp
record_flow_leak.iloc[i, :] = flow_temp
# delete leak
wn = simple_recover_wn(wn, whole_inf)
return wn, record_pressure, record_flow, record_pressure_leak, record_flow_leak
def change_node_demand(wn, basic_demand_pd, all_node, noise_type, noise_para):
# 改_wz_____________________________________
record_noise = pd.DataFrame(index=all_node, columns=basic_demand_pd.columns)
for each_node in all_node:
node = wn.get_node(each_node)
num_columns = len(basic_demand_pd.columns)
# 处理前N-1列如果有
for i in range(num_columns - 1):
# 获取原始值并添加噪声
record_noise.loc[each_node].iloc[i] = (1 + produce_noise_number(noise_type, noise_para)) * \
basic_demand_pd.loc[each_node].iloc[i]
node.demand_timeseries_list[i].base_value = record_noise.loc[each_node].iloc[i]
# 处理最后一列(当列数>=1时
if num_columns >= 1:
last_col = basic_demand_pd.columns[-1]
original_last = basic_demand_pd.loc[each_node, last_col]
record_noise.loc[each_node, last_col] = original_last
node.demand_timeseries_list[-1].base_value = original_last
return wn, record_noise
def change_node_demand_leak(wn, record_noise, all_node):
# 改_wz_____________________________
sample_node = wn.get_node(all_node[0])
num_categories = len(sample_node.demand_timeseries_list)
for each in all_node:
node = wn.get_node(each)
for i in range(num_categories):
node.demand_timeseries_list[i].base_value = record_noise.loc[each].iloc[i]
return wn
def cal_group_num(candidate_node_input, cal_group_num):
candidate_node_num = len(candidate_node_input)
if candidate_node_num > 100:
group_num_input = cal_group_num # 30
else:
group_num_input = 10
return group_num_input
def area_output_num_ki_improve(candidate_center, candidate_group, similarity, new_all_node,
top_group_ratio, top_pipe_num_max, top_pipe_num_min, cut_ratio):
final_area = []
final_center = []
all_node_iter = []
if similarity.index.is_unique == False:
total_center_num = len(list(set(similarity.index)))
else:
total_center_num = len(similarity.index)
next_group_num = min(total_center_num, math.ceil(total_center_num / cut_ratio * top_group_ratio))
for i in range(next_group_num):
top_center = similarity.index[i]
top_center_index = find_list_repeat(candidate_center, top_center)
for j in range(len(top_center_index)):
final_area = final_area + candidate_group[top_center_index[j]]
all_node_iter = all_node_iter + list(new_all_node[top_center_index[j]])
final_center.append(top_center)
final_area = list(set(final_area))
if len(final_area) > top_pipe_num_max:
if_end = 0
elif len(final_area) > top_pipe_num_min:
if_end = 1
elif total_center_num == next_group_num:
if_end = 1
else:
if_end = 1
for i in np.arange(next_group_num, total_center_num, 1):
before_list = copy.deepcopy(final_area)
top_center = similarity.index[i]
top_center_index = candidate_center.index(top_center)
temp_group = final_area + candidate_group[top_center_index]
temp_area = list(set(temp_group))
if len(temp_area) < top_pipe_num_min:
final_center.append(top_center)
all_node_iter = all_node_iter + list(new_all_node[top_center_index])
final_area = temp_area
elif len(temp_area) < top_pipe_num_max:
final_center.append(top_center)
all_node_iter = all_node_iter + list(new_all_node[top_center_index])
final_area = temp_area
break
else:
a = len(temp_area) - top_pipe_num_max
b = top_pipe_num_min - len(before_list)
if a >= b:
final_area = before_list
else:
final_center.append(top_center)
all_node_iter = all_node_iter + list(new_all_node[top_center_index])
final_area = temp_area
break
final_center = list(set(final_center))
all_node_iter = list(set(all_node_iter))
return final_area, final_center, all_node_iter, if_end
def find_list_repeat(candidate_center, target):
repeated_list = []
for index, nums in enumerate(candidate_center):
if nums == target:
repeated_list.append(index)
return repeated_list
def change_para_of_wn(wn, pipe_roughness_change):
for pipe_name, pipe in wn.pipes():
pipe.roughness = pipe_roughness_change[pipe_name]
return wn
def cal_DtoTop1(G0, pipe_leak, located_pipe, pipe_start_node_all, pipe_end_node_all, pipe_length):
if pipe_leak == located_pipe:
result_DtoTop1 = 0
result_DtoTop1_num = 0
else:
pipe_leak_start_node = pipe_start_node_all[pipe_leak]
pipe_leak_end_node = pipe_end_node_all[pipe_leak]
located_pipe_start_node = pipe_start_node_all[located_pipe]
located_pipe_end_node = pipe_end_node_all[located_pipe]
DtoTop1_series = pd.Series(dtype=object)
DtoTop1_num_series = pd.Series(dtype=object)
DtoTop1_series['ss'] = nx.shortest_path_length(G0, pipe_leak_start_node, located_pipe_start_node,
weight='weight')
DtoTop1_series['se'] = nx.shortest_path_length(G0, pipe_leak_start_node, located_pipe_end_node, weight='weight')
DtoTop1_series['es'] = nx.shortest_path_length(G0, pipe_leak_end_node, located_pipe_start_node, weight='weight')
DtoTop1_series['ee'] = nx.shortest_path_length(G0, pipe_leak_end_node, located_pipe_end_node, weight='weight')
DtoTop1_num_series['ss'] = nx.shortest_path_length(G0, pipe_leak_start_node, located_pipe_start_node)
DtoTop1_num_series['se'] = nx.shortest_path_length(G0, pipe_leak_start_node, located_pipe_end_node)
DtoTop1_num_series['es'] = nx.shortest_path_length(G0, pipe_leak_end_node, located_pipe_start_node)
DtoTop1_num_series['ee'] = nx.shortest_path_length(G0, pipe_leak_end_node, located_pipe_end_node)
if DtoTop1_num_series.min() == 0:
result_DtoTop1_num = 1
result_DtoTop1 = DtoTop1_series.max() / 2
else:
result_DtoTop1_num = DtoTop1_num_series.min() + 1
DtoTop1_type = DtoTop1_series.argmin()
result_DtoTop1 = DtoTop1_series[DtoTop1_type] + (pipe_length[pipe_leak] + pipe_length[located_pipe]) / 2
return result_DtoTop1, result_DtoTop1_num
def cal_RR(located_pipe, similarity_sp):
if located_pipe in similarity_sp.index:
rank = similarity_sp.index.get_loc(located_pipe)
RR = rank / len(similarity_sp.index)
else:
RR = 1.1
return RR
def cal_cover(similarity, leak_pipe):
if leak_pipe in list(similarity.index):
cover = 1
else:
cover = 0
return cover
def cal_SD(located_pipe, real_pipe, pipe_x, pipe_y):
dx = pipe_x[located_pipe] - pipe_x[real_pipe]
dy = pipe_y[located_pipe] - pipe_y[real_pipe]
SD = math.sqrt(dx * dx + dy * dy)
return SD
def update_similarity(leak_candidate_center, similarity, leak_center_dict):
similarity_new = pd.Series(dtype=object)
for each_center in leak_candidate_center:
houxuan_center = leak_center_dict[each_center]
if len(houxuan_center) > 1:
temp_similarity = similarity[houxuan_center]
similarity_new[each_center] = temp_similarity.max()
else:
if type(similarity[each_center]) == pd.core.series.Series:
similarity_new[each_center] = similarity[each_center].mean()
else:
similarity_new[each_center] = similarity[each_center]
similarity_new = similarity_new.sort_values(ascending=False)
return similarity_new
def extra_judge(similarity):
mean_similarity = float(similarity.mean())
record_sensor = []
record_value = []
for i in range(len(similarity.index)):
if similarity.iloc[i] >= mean_similarity:
record_value.append(similarity.iloc[i])
record_sensor.append(similarity.index[i])
out_put_similarity = pd.Series(record_value, index=record_sensor, dtype=object)
cut_ratio = len(out_put_similarity.index) / len(similarity.index)
return cut_ratio, out_put_similarity
def adjust_ratio(similarity_mode, cos_h, dis_h, dis_f_h, low_limit=0.1):
if similarity_mode == 'CAF':
if cos_h < low_limit:
cos_h = low_limit
dis_f_h = 1 - cos_h
elif dis_f_h < low_limit:
dis_f_h = low_limit
cos_h = 1 - dis_f_h
elif similarity_mode == 'CAD_new_gy':
if dis_h < low_limit:
dis_h = low_limit
cos_h = 1 - dis_h
elif cos_h < low_limit:
cos_h = low_limit
dis_h = 1 - cos_h
elif similarity_mode == 'CDF':
normal_index = [0, 1, 2]
h_list = [cos_h, dis_h, dis_f_h]
if cos_h < low_limit:
h_list[0] = low_limit
normal_index.remove(0)
if dis_h < low_limit:
h_list[1] = low_limit
normal_index.remove(1)
if dis_f_h < low_limit:
h_list[2] = low_limit
normal_index.remove(2)
if len(normal_index) == 1:
h_list[normal_index[0]] = h_list[normal_index[0]] - (sum(h_list) - 1)
elif len(normal_index) == 2:
sum_list = sum(h_list)
multiper = 1 - (sum_list - 1) / (h_list[normal_index[0]] + h_list[normal_index[1]])
h_list[normal_index[0]] = h_list[normal_index[0]] * multiper
h_list[normal_index[1]] = h_list[normal_index[1]] * multiper
cos_h, dis_h, dis_f_h = h_list[0], h_list[1], h_list[2]
return cos_h, dis_h, dis_f_h
# DAND
def DN_search_multi_simple_add_flow_count_new(wn, G0, all_node, node_x, node_y, pipe_start_node_all, pipe_end_node_all,
couple_node_length, node_pipe_dic, all_node_series,
top_group_ratio, top_pipe_num_max, top_pipe_num_min,
candidate_pipe_input_initial,
similarity_mode, pressure_monitor, pressure_predict, pressure_normal,
pressure_leak_all,
flow_monitor, flow_predict, flow_normal, flow_leak_all,
timestep_list, max_flow, group_basic_num, Top_sensor_num, if_gy,
pressure_threshold):
"""
是一个较为复杂的迭代搜索函数,用于通过多次分组、仿真与相似性计算,最终定位出最可能发生漏损(或爆管)的管道。
函数内部依次调用前述的分组metis_grouping_pipe_weight、相似性计算cal_similarity_all_multi_new_sq_improve_double_lzr
以及候选区域更新area_output_num_ki_improve函数。
:param wn: 水网模型
:param G0: 网络图
:param all_node: 节点列表
:param node_x: 节点坐标映射(字典或 Series
:param node_y: 节点坐标映射(字典或 Series
:param pipe_start_node_all: 字典
:param pipe_end_node_all: 字典
:param couple_node_length: 字典
:param node_pipe_dic: 字典
:param all_node_series: pandas Series
:param top_group_ratio: 数值
:param top_pipe_num_max: 数值
:param top_pipe_num_min: 数值
:param candidate_pipe_input_initial: 候选管道列表
:param similarity_mode: 字符串
:param pressure_monitor: 压力数
:param pressure_predict: 压力数
:param pressure_normal: 压力数
:param pressure_leak_all: 压力数
:param timestep_list: 时间步列表
:param max_flow: Series 或字典
:param group_basic_num: 数值
:param Top_sensor_num: 整数
:param if_gy: 标志(整型或布尔)
:param pressure_threshold: 数值
:return: 最终候选管道(最高相似性对应的管道名称)、耗时(秒)、模拟次数、更新后的 wn、以及最终相似性排序的 pandas Series
"""
iter_count = 0
all_node_iter = copy.deepcopy(all_node)
candidate_pipe_input = copy.deepcopy(candidate_pipe_input_initial) # 可能漏损管段
t1 = datetime.now()
if_flow, if_only_cos, if_only_flow = decode_mode(similarity_mode) # 定位方法
# threshold
if if_only_flow == 1:
dpressure = (flow_predict - flow_monitor).mean()
dpressure = np.abs(dpressure)
effective_sensor = list(dpressure.index)
else:
dpressure = (pressure_predict - pressure_monitor).mean()
dpressure = np.abs(dpressure)
dpressure = dpressure[dpressure > pressure_threshold]
effective_sensor = list(dpressure.index)
simulation_times = 0 # 模拟次数
if len(dpressure) > 0:
cos_h = 0
dis_h = 0
dis_f_h = 0
if_compalsive = 0
record_center_dataset = []
# iter
while 1:
final_area = []
final_center = []
group_num = cal_group_num(candidate_pipe_input, group_basic_num)
# group 分组,得出候选漏损中心
candidate_center_list, candidate_group_list, new_all_node = metis_grouping_pipe_weight(G0, wn,
all_node_iter,
candidate_pipe_input,
group_num, node_x,
node_y,
pipe_start_node_all,
pipe_end_node_all,
node_pipe_dic,
all_node_series,
couple_node_length)
simulation_times = simulation_times + len(candidate_center_list)
# pick_pressure_leak
pressure_leak = pressure_leak_all.loc[candidate_center_list].loc[:, :]
flow_leak = flow_leak_all.loc[candidate_center_list].loc[:, :]
# pressure_leak_f= pressure_leak.swaplevel()
# --------------------------------------------------------
add_center = []
leak_center_dict = dict()
for i in range(len(candidate_center_list)):
houxuan_center = []
for each_center in record_center_dataset:
if each_center in candidate_group_list[i] and each_center != candidate_center_list[i]:
houxuan_center.append(each_center)
add_center = add_center + houxuan_center
houxuan_center.append(candidate_center_list[i])
leak_center_dict[candidate_center_list[i]] = houxuan_center
for each_center in candidate_center_list:
if each_center not in record_center_dataset:
record_center_dataset.append(each_center)
# --------------------------------------------------------
# --------------------------------------------------------
if len(add_center) > 0:
s3 = pressure_leak_all.loc[add_center]
pressure_leak = pd.concat([pressure_leak, s3])
s4 = flow_leak_all.loc[add_center]
flow_leak = pd.concat([flow_leak, s4])
# --------------------------------------------------------
# --------------------------------------------------------
#
if len(candidate_pipe_input) < 1.2 * top_pipe_num_max / top_group_ratio:
if_compalsive = 1
cos_h, dis_h, dis_f_h = adjust_ratio(similarity_mode, cos_h, dis_h, dis_f_h)
candidate_center_list_sup = candidate_center_list + add_center
similarity, cos_h, dis_h, dis_f_h, break_flag = cal_similarity_all_multi_new_sq_improve_double_lzr(
candidate_center_list_sup, similarity_mode, pressure_leak,
pressure_monitor, pressure_predict, pressure_normal, if_flow, if_only_cos, if_only_flow,
flow_leak, flow_monitor, flow_predict, flow_normal,
timestep_list, Top_sensor_num, if_gy, effective_sensor, cos_h, dis_h, dis_f_h, if_compalsive, max_flow)
if break_flag == 1:
break
new_similarity = update_similarity(candidate_center_list, similarity, leak_center_dict)
if len(candidate_pipe_input) > top_pipe_num_max / top_group_ratio:
cut_ratio, new_similarity = extra_judge(new_similarity)
else:
cut_ratio = 1
final_area_t, final_center_t, all_node_new_1, if_end = area_output_num_ki_improve(candidate_center_list,
candidate_group_list,
new_similarity,
new_all_node,
top_group_ratio,
top_pipe_num_max,
top_pipe_num_min,
cut_ratio)
final_area = final_area + final_area_t
final_center = final_center + final_center_t
final_area = list(set(final_area))
final_center = list(set(final_center))
if if_end == 1:
break
elif len(candidate_pipe_input) == len(final_area):
break
else:
candidate_pipe_input = final_area
all_node_iter = all_node_new_1
iter_count += 1
sys.stdout.write(
'\r' + '已经完成' + str(iter_count) + '次迭代计算' + '候选节点' + str(len(final_area)) + '')
if break_flag == 0:
final_area_pipe = copy.deepcopy(final_area)
simulation_times = simulation_times + len(final_area)
pressure_leak_sp = pressure_leak_all.loc[final_area_pipe].loc[:, :]
flow_leak_sp = flow_leak_all.loc[final_area_pipe].loc[:, :]
similarity_sp, cos_h, dis_h, dis_f_h, break_flag = cal_similarity_all_multi_new_sq_improve_double_lzr(
final_area_pipe, similarity_mode, pressure_leak_sp,
pressure_monitor, pressure_predict, pressure_normal, if_flow,
if_only_cos, if_only_flow,
flow_leak_sp, flow_monitor, flow_predict, flow_normal,
timestep_list, Top_sensor_num, if_gy, effective_sensor, cos_h, dis_h, dis_f_h, if_compalsive, max_flow)
else:
dpressure = (pressure_predict - pressure_monitor).mean()
dpressure = np.abs(dpressure)
simulation_times = simulation_times + len(dpressure.index)
similarity_sp = pd.Series(dtype=object)
for each_node in dpressure.index:
pipe = node_pipe_dic[each_node][0]
similarity_sp.loc[pipe] = dpressure.loc[each_node]
similarity_sp = similarity_sp.sort_values(ascending=False)
t2 = datetime.now()
final_area_pipe = []
sys.stdout.write(
'\r' + '已经完成' + str(iter_count + 1) + '次迭代计算' + '候选节点' + str(len(final_area_pipe)) + '')
t2 = datetime.now()
dt = (t2 - t1).seconds
else:
dpressure = (pressure_predict - pressure_monitor).mean()
dpressure = np.abs(dpressure)
similarity_sp = pd.Series(dtype=object)
for each_node in dpressure.index:
pipe = node_pipe_dic[each_node][0]
similarity_sp.loc[pipe] = dpressure.loc[each_node]
similarity_sp = similarity_sp.sort_values(ascending=False)
t2 = datetime.now()
dt = (t2 - t1).seconds
return similarity_sp.index[0], dt, simulation_times, wn, similarity_sp
def decode_mode(similarity_mode):
if similarity_mode == 'COS':
if_flow = 0
if_only_cos = 1
if_only_flow = 0
elif similarity_mode == 'CAD_new_gy':
if_flow = 0
if_only_cos = 0
if_only_flow = 0
elif similarity_mode == 'CDF':
if_flow = 1
if_only_cos = 0
if_only_flow = 0
elif similarity_mode == 'CAF':
if_flow = 1
if_only_cos = 1
if_only_flow = 0
elif similarity_mode == 'DIS':
if_flow = 1
if_only_cos = 2
if_only_flow = 0
elif similarity_mode == 'OF':
if_flow = 1
if_only_cos = 0
if_only_flow = 1
return if_flow, if_only_cos, if_only_flow
# 根据爆管直径选择可能管段
def pick_pipe(all_pipes, pipe_diameter, limited_diameter):
candidate_pipe = []
for each_pipe in all_pipes:
if pipe_diameter[each_pipe] >= limited_diameter:
candidate_pipe.append(each_pipe)
return candidate_pipe
#2025/6/7
def locate_burst(name: str, pressure_SCADA_ID_list: list[list[str]], burst_SCADA_pressure: pd.Series,
normal_SCADA_pressure: pd.Series, flow_SCADA_ID_list: list[list[str]], burst_SCADA_flow: pd.Series,
normal_SCADA_flow: pd.Series, burst_leakage: float, burst_happen_time: str, basic_pressure: float = 10.0):
"""
漏损定位
:param name: 数据库名称
:param pressure_SCADA_ID_list: 压力SCADA点的名称列表
:param burst_SCADA_pressure: 爆管时真实压力 or 爆管方案模拟的到的压力
:param normal_SCADA_pressure: 爆管时间数据平滑得到的压力 or 正常情况下的压力
:param burst_leakage: 爆管漏损水量
:param burst_happen_time: 爆管发生时间,格式为'2024-11-25T09:00:00+08:00'
:param basic_pressure: 水厂给整片管网的最小服务压力。根据《城镇供水服务》GB/T 32063-2015管网末梢最小服务压力应不低于 0.14 MPa14米水柱,
《消防给水及消火栓系统技术规范》GB 50974-2014规定,火灾时管网压力应至少达到 0.10 MPa10米水柱
:return:
"""
# candidate_type
candidate_type = 'pipe'
top_group_ratio = 0.4
top_pipe_num_max = 42 # 25
top_pipe_num_min = 34 # 35
# modified
# similarity_mode_list = ['CDF_inflow','CDF_other','CAD_new_gy','CAF','COS']
method_rational = 0.5
# 保存路径 与 部分参数 修改
file_fold = './1mypaperdata_f/sensor_opt_validation/' # './sensor_opt/'
Method_mode = 'DAND'
Noise_level = 'N2'
Leakage_level = 'L02'
version = 'wz_0513'
file_path = os.path.join(file_fold, f'{name}' + '_' + Method_mode +
'_' + Noise_level + '_' + Leakage_level + '_' + version + '.xlsx')
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
writer = pd.ExcelWriter(file_path)
# other basic_setting 可不修改
candidate_leak_ratio = [float(Leakage_level[1:]) / 10]
node_d_noise_list = [0, 0.05, 0.1, 0.15] # /3.27
pipe_c_noise_list = [0, 5, 10, 15] # /3.27
noise_para_monitor_list = [0, 0.1, 0.2, 0.3] #
noise_f_para_monitor_list = [0, 1, 2, 5] # %
leak_flow_para_mean_list = [0, 0.0125, 0.0125, 0.0125]
leak_flow_para_std1_list = [0, 0.0125 / 4, 0.0125 / 4, 0.0125 / 4]
leak_flow_para_std2_list = [0, 0.0025 / 3, 0.0025 / 3, 0.0025 / 3]
noise_level = int(Noise_level[1:])
## other
# driven_mode = 'DD'
require_p = 100 * 1.42 # 20mh2o
minimum_p = 0 # 0
if_plot = 0
# 加载原始管网模型及读取信息
# 调用 load_inp 构造水网模型(使用 wntr 模块),加载 正常工况的 INP 文件
wn_origin = load_inp(name=name, before_burst_time=burst_happen_time)
# 由read_inf_inp 获取水网中所有节点、节点海拔、坐标、候选管道(初始状态为开)、管道起始和结束节点、长度和直径等信息
all_node, node_elevation, node_coordinates, candidate_pipe_init, pipe_start_node, pipe_end_node, pipe_length, pipe_diameter = read_inf_inp(
wn_origin)
candidate_pipe_input, min_diameter = cal_possible_pipe(burst_leakage, candidate_pipe_init, pipe_diameter) # 获取可能管段和最小爆管直径
node_x, node_y = cal_node_coordinate(all_node, node_coordinates)
all_link, pipe_start_node_all, pipe_end_node_all = read_inf_inp_other(wn_origin)
# 节点索引与节点-管道字典
# 首先取出所有节点名称,再构造一个 pandas Series将每个节点名称映射到一个索引0, 1, 2, …)
all_node_series = wn_origin.node_name_list
all_node_series = pd.Series(range(len(all_node_series)), index=all_node_series)
node_pipe_dic = {node: wn_origin.get_links_for_node(node) for node in wn_origin.node_name_list}
couple_node_length = dict()
for each_link in all_link:
start_node = pipe_start_node_all[each_link]
end_node = pipe_end_node_all[each_link]
name1 = start_node + ',' + end_node
name2 = end_node + ',' + start_node
if each_link in pipe_length.index:
couple_node_length[name1] = math.ceil(pipe_length[each_link] * 10)
couple_node_length[name2] = math.ceil(pipe_length[each_link] * 10)
else:
couple_node_length[name1] = 1
couple_node_length[name2] = 1
# 首先排除无关节点直接对all_node操作
n_node = []
all_node = list(set(all_node) - set(n_node))
# 计算需要排除的节点
except_node_boundary = []
tank_names = wn_origin.tank_name_list
reservoir_names = wn_origin.reservoir_name_list
except_node_construction = list(tank_names + reservoir_names)
except_node_list = except_node_boundary + except_node_construction
all_node_temp = list(set(all_node) - set(except_node_boundary))
all_node_temp = list(set(all_node_temp) - set(except_node_construction))
all_node_new = all_node_temp
all_node_pcv = []
## 模拟漏损管道
candidate_pipe_input = list(set(candidate_pipe_input))
candidate_pipe_input_initial = list(set(candidate_pipe_input))
## 随机选择一定数量的管段
num_to_select = max(2, int(len(candidate_pipe_input_initial) * 0.001))
leakage_position_list = random.sample(candidate_pipe_input_initial, num_to_select)
# leakage_position_list = ['ZBBGXSZK001865','ZBBGXSZK002094','ZBBGXSZK002075']
## 获取水塔和水库的进出水管段及水泵
# reservoir_links = {}
# tank_links = {}
# for res in reservoir_names:
# reservoir_links[res] = {'pipes': {'pos': [], 'neg': []}, 'pumps': {'pos': [], 'neg': []}}
# for tank in tank_names:
# tank_links[tank] = {'pipes': {'pos': [], 'neg': []}, 'pumps': {'pos': [], 'neg': []}}
# for pipe_name in wn_origin.pipe_name_list:
# pipe = wn_origin.get_link(pipe_name)
# start_node = pipe.start_node_name
# end_node = pipe.end_node_name
# # 水库相关的管道
# if start_node in reservoir_names:
# reservoir_links[start_node]['pipes']['pos'].append(pipe_name)
# if end_node in reservoir_names:
# reservoir_links[end_node]['pipes']['neg'].append(pipe_name)
# # 水塔相关的管道
# if start_node in tank_names:
# tank_links[start_node]['pipes']['pos'].append(pipe_name)
# if end_node in tank_names:
# tank_links[end_node]['pipes']['neg'].append(pipe_name)
# for pump_name in wn_origin.pump_name_list:
# pump = wn_origin.get_link(pump_name)
# start_node = pump.start_node_name
# end_node = pump.end_node_name
# # 水库相关水泵
# if start_node in reservoir_names:
# reservoir_links[start_node]['pumps']['pos'].append(pump_name)
# if end_node in reservoir_names:
# reservoir_links[end_node]['pumps']['neg'].append(pump_name)
# # 水塔相关水泵
# if start_node in tank_names:
# tank_links[start_node]['pumps']['pos'].append(pump_name)
# if end_node in tank_names:
# tank_links[end_node]['pumps']['neg'].append(pump_name)
# all_reservoir_pos_links = [
# link
# for res_data in reservoir_links.values()
# for link_type in ['pipes', 'pumps']
# for link in res_data[link_type]['pos']
# ]
# all_reservoir_neg_links = [
# link
# for res_data in reservoir_links.values()
# for link_type in ['pipes', 'pumps']
# for link in res_data[link_type]['neg']
# ]
# all_tank_pos_links = [
# link
# for tan_data in tank_links.values()
# for link_type in ['pipes', 'pumps']
# for link in tan_data[link_type]['pos']
# ]
# all_tank_neg_links = [
# link
# for tan_data in tank_links.values()
# for link_type in ['pipes', 'pumps']
# for link in tan_data[link_type]['neg']
# ]
# all_tank_pipe_links = [
# link
# for tan_data in tank_links.values()
# for link_type in ['pos', 'neg']
# for link in tan_data['pipes'][link_type]
# ]
# f1 = []
# f_corr2_f = all_tank_pipe_links
# _pos = all_reservoir_pos_links + all_tank_pos_links # 出水
# _neg = all_reservoir_neg_links + all_tank_neg_links # 进水
# f0 = _pos + _neg
# 需测试的监测点布局
## sensor analyse S5_40 ----------------------------------------------
sensor_name_list_str = ['test'] # 每个监测布局的名称
sensor_name_list = pressure_SCADA_ID_list
# 不同的监测点对应的定位方法,可设置全为'CAD_new_gy'
similarity_mode_list = ['COS'] * len(sensor_name_list_str)
sensor_f_name_list = [[] for _ in range(len(sensor_name_list_str))] # 流量监测点
# sensor_f_name_list = flow_SCADA_ID_list
sensor_level_list = list(np.arange(len(sensor_name_list_str)))
sensor_name = itertools.chain.from_iterable(sensor_name_list)
sensor_name = list(set(sensor_name))
sensor_f_name = itertools.chain.from_iterable(sensor_f_name_list)
sensor_f_name = list(set(sensor_f_name))
# sensor_f_name = sensor_f_name + f0 + f1 + f_corr2_f
# sensor_f_name = list(set(sensor_f_name))
sensor_name_all = copy.deepcopy(sensor_name)
sensor_f_name_all = copy.deepcopy(sensor_f_name)
r2 = pd.DataFrame(
index=pd.MultiIndex.from_product([sensor_name_list_str, leakage_position_list]), columns=['TD'])
r3 = pd.DataFrame(
index=pd.MultiIndex.from_product([sensor_name_list_str, leakage_position_list]), columns=['SD'])
r4 = pd.DataFrame(
index=pd.MultiIndex.from_product([sensor_name_list_str, leakage_position_list]), columns=['PD'])
r5 = pd.DataFrame(
index=pd.MultiIndex.from_product([sensor_name_list_str, leakage_position_list]), columns=['lpID'])
r6 = pd.DataFrame(
index=pd.MultiIndex.from_product([sensor_name_list_str, leakage_position_list]), columns=['Times'])
r7 = pd.DataFrame(
index=pd.MultiIndex.from_product([sensor_name_list_str, leakage_position_list]), columns=['Cover'])
recorded_position = []
r2 = r2.sort_index()
r3 = r3.sort_index()
r4 = r4.sort_index()
r5 = r5.sort_index()
r6 = r6.sort_index()
r7 = r7.sort_index()
# 定位
basic_group_num = 10
# 无需修改部分
# load network
G0 = construct_graph(wn_origin)
# ====================================================================================================================
pressure_basic, flow_basic, basic_p, top_sensor, base_demand = normal_simulation_pf(wn_origin, sensor_name_all, sensor_f_name_all)
timestep_list = [0]
pipe_x, pipe_y = cal_pipe_coordinate(candidate_pipe_input, pipe_start_node, pipe_end_node, node_coordinates)
## ------------------------------------------------------------------------------------------------------------
## ----------------------------------------------------------------------------
# get pipe coefficient
pipe_coefficient = wn_origin.query_link_attribute('roughness', link_type=wntr.network.model.Pipe)
basic_demand_pd = produce_pattern_value(wn_origin, all_node_new)
## ----------------------------------------------------------------------------
# 改_wz_________________________________________
# # noise =======================================================================
# sum_flow = 0
# for p in _pos:
# sum_flow += flow_basic[p]
# for p in _neg:
# sum_flow -= flow_basic[p]
# sum_flow = sum_flow.mean()
# # noise
# node_d_noise = node_d_noise_list[noise_level] # /3.27
# pipe_c_noise = pipe_c_noise_list[noise_level] # /3.27
# # leak_flow_para = 1 * sum_flow.values[0] * 0.017 /3600
# leak_flow_para_mean = 1 * sum_flow * leak_flow_para_mean_list[noise_level]
# leak_flow_para_std1 = 1 * sum_flow * leak_flow_para_std1_list[noise_level]
# leak_flow_para_std2 = 1 * sum_flow * leak_flow_para_std2_list[noise_level]
# noise_para_monitor = noise_para_monitor_list[noise_level] #
noise_f_para_monitor = noise_f_para_monitor_list[noise_level] # %
velocity = 1
noise_f_max = pipe_diameter ** 2 * math.pi * velocity * noise_f_para_monitor / 400
max_flow = pipe_diameter ** 2 * math.pi * velocity / 4
# noise =======================================================================
# basic_leak_similation =======================================================================
# 进行管段模拟漏损(而非节点模拟漏损),获得每根管道的漏损流量和压力
pressure_leak_all_pre_new_all = pd.DataFrame(
index=pd.MultiIndex.from_product([candidate_leak_ratio, candidate_pipe_input_initial, timestep_list]),
columns=sensor_name_all)
pressure_leak_all_pre_new_all = pressure_leak_all_pre_new_all.sort_index()
flow_leak_all_pre_new_all = pd.DataFrame(
index=pd.MultiIndex.from_product([candidate_leak_ratio, candidate_pipe_input_initial, timestep_list]),
columns=sensor_f_name_all)
flow_leak_all_pre_new_all = flow_leak_all_pre_new_all.sort_index()
pressure_leak_basic_all = pd.DataFrame(
index=pd.MultiIndex.from_product([candidate_leak_ratio, candidate_pipe_input_initial]), columns=sensor_name_all)
for pick_leak_ratio in candidate_leak_ratio:
# leak_flow_simu = sum_flow * pick_leak_ratio
leak_flow_simu = burst_leakage
wn = copy.deepcopy(wn_origin)
pressure_leak_all_pre, flow_leak_all_pre, leak_candidate_center = cal_signature_pipe_multi_pf(wn,
leak_flow_simu,
candidate_pipe_input_initial,
timestep_list,
sensor_name_all,
sensor_f_name_all)
for each in candidate_pipe_input_initial:
pressure_leak_all_pre_new_all.loc[pick_leak_ratio].loc[each].loc[:, :] = pressure_leak_all_pre.loc[
each].loc[:, sensor_name_all]
for each in candidate_pipe_input_initial:
flow_leak_all_pre_new_all.loc[pick_leak_ratio].loc[each].loc[:, :] = flow_leak_all_pre.loc[each].loc[:,
sensor_f_name_all]
for candidate_pipe in candidate_pipe_input_initial:
pressure_leak_basic_all.loc[pick_leak_ratio].loc[candidate_pipe, :] = pressure_leak_all_pre.loc[
candidate_pipe].loc[:, :].mean()
# sensor information =======================================================================
## ----------------------------------------------------------------------------
DEMAND_COUNT = 0
wn_change = copy.deepcopy(wn_origin)
wn = copy.deepcopy(wn_origin)
# ==========================================================================================================
pressure_basic_basic, flow_basic_basic, basic_p, top_sensor, base_demand = normal_simulation_multi_pf(wn, sensor_name_all, sensor_f_name_all)
# for each_candidate in leakage_position_list:
# each_candidate='111'
# wn = copy.deepcopy(wn_origin)
# r1 = pd.DataFrame(
# index=sensor_name_list_str,
# columns=['TD', 'SD', 'PD', 'lpID', 'Times', 'Cover', 'Simu Count'])
# r1 = r1.sort_index()
#
# recorded_position.append(each_candidate)
# leak_index = int(np.round(random.random() * (len(candidate_leak_ratio) - 1)))
# leak_flow_simu =candidate_leak_ratio[leak_index] * sum_flow
# leak_mag = add_noise_number(leak_flow_simu, 'uni', leak_flow_para)
# wn_change = copy.deepcopy(wn_origin)
# 获得对应模拟时刻下,正常噪声和添加漏损后的监测点数据
# wn_change, pressure_predict_basic, flow_predict_basic, pressure_output, flow_output = add_noise_in_wn_pf(
# wn_change, pipe_c_noise, timestep_list,
# pipe_coefficient, sensor_name_all, sensor_f_name_all, all_node_new,
# basic_demand_pd,
# 'uni', node_d_noise,
# each_candidate, leak_mag)
#
# pressure_monitor_basic = add_noise_pd(pressure_output, 'uni', noise_para_monitor)
# flow_monitor_basic = add_noise_percentage_pd(flow_output, 'uni', noise_f_para_monitor)
leak_index = int(np.round(random.random() * (len(candidate_leak_ratio) - 1)))
leak_mag = leak_flow_simu
print('leak_mag' + str(leak_mag * 3600) + '.........')
pressure_monitor = pd.DataFrame(index=timestep_list, columns=sensor_name)
pressure_predict = pd.DataFrame(index=timestep_list, columns=sensor_name)
pressure_basic = pd.DataFrame(index=timestep_list, columns=sensor_name)
flow_monitor = pd.DataFrame(index=timestep_list, columns=sensor_f_name)
flow_predict = pd.DataFrame(index=timestep_list, columns=sensor_f_name)
flow_basic = pd.DataFrame(index=timestep_list, columns=sensor_f_name)
for i in range(len(timestep_list)):
for sensor_level in sensor_level_list:
sensor_name = sensor_name_list[sensor_level]
pressure_monitor.iloc[i, :] = burst_SCADA_pressure.loc[sensor_name]
pressure_predict.iloc[i, :] = normal_SCADA_pressure.loc[sensor_name]
pressure_basic.iloc[i, :] = pressure_basic_basic.loc[:, sensor_name]
sensor_f_name = sensor_f_name_list[sensor_level]
pressure_leak_all_pre_new = pd.DataFrame(
index=pd.MultiIndex.from_product([candidate_pipe_input_initial, timestep_list]), # 笛卡尔积,生成多级索引
columns=sensor_name)
pressure_leak_all_pre_new = pressure_leak_all_pre_new.sort_index() # 排序
flow_leak_all_pre_new = pd.DataFrame(
index=pd.MultiIndex.from_product([candidate_pipe_input_initial, timestep_list]),
columns=sensor_f_name)
flow_leak_all_pre_new = flow_leak_all_pre_new.sort_index()
for each in candidate_pipe_input_initial:
pressure_leak_all_pre_new.loc[each].loc[:, :] = pressure_leak_all_pre_new_all.loc[candidate_leak_ratio[leak_index]].loc[each].loc[:,
sensor_name]
flow_leak_all_pre_new.loc[each].loc[:, :] = flow_leak_all_pre_new_all.loc[candidate_leak_ratio[leak_index]].loc[each].loc[:,
sensor_f_name]
wn = copy.deepcopy(wn_origin)
# start DN search
# =================================================================
use_similarity_mode = similarity_mode_list[sensor_level]
flow_monitor.iloc[i, :] = burst_SCADA_flow.loc[sensor_f_name]
flow_predict.iloc[i, :] = normal_SCADA_flow.loc[sensor_f_name]
flow_basic.iloc[i, :] = flow_basic_basic.loc[:, sensor_f_name]
# =================================================================
located_pipe, dt, simu_count, wn, similarity_sp = DN_search_multi_simple_add_flow_count_new(wn, G0,
all_node,
node_x,
node_y,
pipe_start_node_all,
pipe_end_node_all,
couple_node_length,
node_pipe_dic,
all_node_series,
top_group_ratio,
top_pipe_num_max,
top_pipe_num_min,
candidate_pipe_input_initial,
use_similarity_mode,
pressure_monitor,
pressure_predict,
pressure_basic,
pressure_leak_all_pre_new,
flow_monitor,
flow_predict,
flow_basic,
flow_leak_all_pre_new,
timestep_list,
max_flow,
basic_group_num,
Top_sensor_num=len(
sensor_name),
if_gy=0,
pressure_threshold=0.05)
print(located_pipe, dt, simu_count, similarity_sp)
# if len(list(similarity_sp.index)) == 0:
# SD = 9999
# TD = 9999
# PD = 90
# lpID = ''
# cover = 0
# else:
# SD = cal_SD(located_pipe, each_candidate, pipe_x, pipe_y)
# TD, PD = cal_DtoTop1(G0, each_candidate, located_pipe, pipe_start_node_all, pipe_end_node_all,
# pipe_length)
# lpID = located_pipe
# cover = cal_cover(similarity_sp, each_candidate)
# r1.loc[sensor_name_list_str[sensor_level]].loc['TD'] = TD # 定位拓扑距离
# r1.loc[sensor_name_list_str[sensor_level]].loc['SD'] = SD # 定位空间距离
# r1.loc[sensor_name_list_str[sensor_level]].loc['lpID'] = lpID # 不重要的参数,可删去
# r1.loc[sensor_name_list_str[sensor_level]].loc['Times'] = dt # 定位时间
# r1.loc[sensor_name_list_str[sensor_level]].loc['Cover'] = cover # 覆盖率
# r1.loc[sensor_name_list_str[sensor_level]].loc['PD'] = PD # 定位拓扑连接数
# r1.loc[sensor_name_list_str[sensor_level]].loc['Simu Count'] = simu_count # 模拟次数
#
# # --- 获取当前候选点的前10管道 ---
# top_10_pipes = similarity_sp.head(10)
# df_to_export = pd.DataFrame({
# "管道ID": top_10_pipes.index,
# "特征相似值": top_10_pipes.values
# })
# sheet_name = str(each_candidate)
# # --- 写入当前候选点的子表 ---
# df_to_export.to_excel(
# writer,
# sheet_name=sheet_name,
# index=False,
# header=True
# )
# print(f"所有候选点的相似度排名已保存")
#
# print(
# 'finish candidate:' + each_candidate + ' and total calculation times:' + str(
# DEMAND_COUNT) + '..')
#
# # r1.to_excel(writer, sheet_name=each_candidate)
#
# for sensor_level in sensor_level_list:
# sensor_name = sensor_name_list_str[sensor_level]
# # 获取该传感器在r1中的所有相关数据
# sensor_data = r1.loc[sensor_name]
# r2.loc[(sensor_name, each_candidate)] = sensor_data['TD']
# r3.loc[(sensor_name, each_candidate)] = sensor_data['SD']
# r4.loc[(sensor_name, each_candidate)] = sensor_data['PD']
# r5.loc[(sensor_name, each_candidate)] = sensor_data['lpID']
# r6.loc[(sensor_name, each_candidate)] = sensor_data['Times']
# r7.loc[(sensor_name, each_candidate)] = sensor_data['Cover']
#
# DEMAND_COUNT += 1
# print('finish' + str(DEMAND_COUNT) + 'candidates.........')
# if DEMAND_COUNT > 1:
#
# for sensor_level in sensor_level_list:
# print('sensor_level_' + str(sensor_level))
# print('--SD: ' +
# str(r3.loc[sensor_name_list_str[sensor_level]].loc[recorded_position].mean()) +
# ', nowTD:' + str(r2.loc[sensor_name_list_str[sensor_level]].loc[each_candidate].mean()) +
# ', PD:' + str(r3.loc[sensor_name_list_str[sensor_level]].loc[recorded_position].mean()) +
# ', Cover:' + str(r7.loc[sensor_name_list_str[sensor_level]].loc[recorded_position].mean()) )
#
# print('====================================================')
#
#
# # 保存的文件 里面包含的评估参数 主要的就是TD Cover TD
# r2.to_excel(writer, sheet_name='TD')
# r3.to_excel(writer, sheet_name='SD')
# r4.to_excel(writer, sheet_name='PD')
# r5.to_excel(writer, sheet_name='lpID')
# r6.to_excel(writer, sheet_name='Times')
# r7.to_excel(writer, sheet_name='Cover')
# writer._save()
if __name__ == '__main__':
# influxdb_api.query_corresponding_query_id_and_element_id('bb')
# pressure_SCADA_ID_list = [list(globals.scheme_pressure_ids.values())]
# # print(pressure_SCADA_ID_list)
# flow_SCADA_ID_list = [list(globals.scheme_pipe_flow_ids.values())]
# burst_leakage = influxdb_api.manually_get_burst_flow(scheme_Type='burst_Analysis', scheme_Name='burst_scheme', scheme_start_time='2025-03-10T12:00:00+08:00')
# # print(burst_leakage)
burst_SCADA_pressure, pressure_SCADA_IDs_list, normal_SCADA_pressure = manually_get_burst_time_SCADA_pressure(name='bb', manually_burst_time='2025-03-30T12:00:00+08:00', scheme_Type='burst_Analysis', scheme_Name='test0618')
print(type(pressure_SCADA_IDs_list), pressure_SCADA_IDs_list)
## 平铺 pressure_SCADA_IDs_list
# SCADA数据有问题使用模拟数据代替SCADA真实数据
flat_ids = [item for sublist in pressure_SCADA_IDs_list for item in sublist]
result_records = influxdb_api.query_all_record_by_time_property(query_time='2025-03-30T12:00:00+08:00', type='node', property='pressure')
simulation_normal_SCADA_presssure = [record for record in result_records if record['ID'] in flat_ids]
normal_SCADA_pressure = {record['ID']: record['value'] for record in simulation_normal_SCADA_presssure}
print(normal_SCADA_pressure)
normal_SCADA_pressure = pd.Series(normal_SCADA_pressure)
normal_SCADA_pressure = normal_SCADA_pressure.reindex(burst_SCADA_pressure.index)
# burst_flow = manually_get_burst_flow(scheme_Type='burst_Analysis', scheme_Name='test0618',
# burst_start_time='2025-03-30T12:00:00+08:00')
flow_SCADA_ID_list = [[]]
burst_SCADA_flow = pd.Series(index=[], dtype=float)
normal_SCADA_flow = pd.Series(index=[], dtype=float)
if_detect, tag = cal_if_detect(burst_SCADA_pressure=burst_SCADA_pressure, normal_SCADA_pressure=normal_SCADA_pressure, min_dpressure=2.0)
locate_burst(name='bb', pressure_SCADA_ID_list=pressure_SCADA_IDs_list, burst_SCADA_pressure=burst_SCADA_pressure,
normal_SCADA_pressure=normal_SCADA_pressure, flow_SCADA_ID_list=flow_SCADA_ID_list, burst_SCADA_flow=burst_SCADA_flow,
normal_SCADA_flow=normal_SCADA_flow, burst_leakage=0.057, burst_happen_time='2025-03-30T12:00:00+08:00')