重构现代化 FastAPI 后端项目框架

This commit is contained in:
2026-01-21 16:50:57 +08:00
parent 9e06e68a15
commit c56f2fd1db
352 changed files with 176 additions and 70 deletions

View File

View File

@@ -0,0 +1,289 @@
# ...existing code...
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pykalman import KalmanFilter
import os
def clean_flow_data_kf(input_csv_path: str, show_plot: bool = False) -> str:
"""
读取 input_csv_path 中的每列时间序列,使用一维 Kalman 滤波平滑并用预测值替换基于 3σ 检测出的异常点。
保存输出为:<input_filename>_cleaned.xlsx与输入同目录并返回输出文件的绝对路径。
仅保留输入文件路径作为参数(按要求)。
"""
# 读取 CSV
data = pd.read_csv(input_csv_path, header=0, index_col=None, encoding="utf-8")
# 存储 Kalman 平滑结果
data_kf = pd.DataFrame(index=data.index, columns=data.columns)
# 平滑每一列
for col in data.columns:
observations = pd.Series(data[col].values).ffill().bfill()
if observations.isna().any():
observations = observations.fillna(observations.mean())
obs = observations.values.astype(float)
kf = KalmanFilter(
transition_matrices=[1],
observation_matrices=[1],
initial_state_mean=float(obs[0]),
initial_state_covariance=1,
observation_covariance=1,
transition_covariance=0.01,
)
# 跳过EM学习使用固定参数以提高性能
state_means, _ = kf.smooth(obs)
data_kf[col] = state_means.flatten()
# 计算残差并用IQR检测异常更稳健的方法
residuals = data - data_kf
residual_thresholds = {}
for col in data.columns:
res_values = residuals[col].dropna().values # 移除NaN以计算IQR
q1 = np.percentile(res_values, 25)
q3 = np.percentile(res_values, 75)
iqr = q3 - q1
lower_threshold = q1 - 1.5 * iqr
upper_threshold = q3 + 1.5 * iqr
residual_thresholds[col] = (lower_threshold, upper_threshold)
cleaned_data = data.copy()
anomalies_info = {}
for col in data.columns:
lower, upper = residual_thresholds[col]
sensor_residuals = residuals[col]
anomaly_mask = (sensor_residuals < lower) | (sensor_residuals > upper)
anomaly_idx = data.index[anomaly_mask.fillna(False)]
anomalies_info[col] = pd.DataFrame(
{
"Observed": data.loc[anomaly_idx, col],
"Kalman_Predicted": data_kf.loc[anomaly_idx, col],
"Residual": sensor_residuals.loc[anomaly_idx],
}
)
cleaned_data.loc[anomaly_idx, f"{col}_cleaned"] = data_kf.loc[anomaly_idx, col]
# 构造输出文件名:在输入文件名基础上加后缀 _cleaned.xlsx
input_dir = os.path.dirname(os.path.abspath(input_csv_path))
input_base = os.path.splitext(os.path.basename(input_csv_path))[0]
output_filename = f"{input_base}_cleaned.xlsx"
output_path = os.path.join(input_dir, output_filename)
# 覆盖同名文件
if os.path.exists(output_path):
os.remove(output_path)
cleaned_data.to_excel(output_path, index=False)
# 可选可视化(第一个传感器)
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
if show_plot and len(data.columns) > 0:
sensor_to_plot = data.columns[0]
plt.figure(figsize=(12, 6))
plt.plot(
data.index,
data[sensor_to_plot],
label="监测值",
marker="o",
markersize=3,
alpha=0.7,
)
plt.plot(
data.index, data_kf[sensor_to_plot], label="Kalman滤波预测值", linewidth=2
)
anomaly_idx = anomalies_info[sensor_to_plot].index
if len(anomaly_idx) > 0:
plt.plot(
anomaly_idx,
data[sensor_to_plot].loc[anomaly_idx],
"ro",
markersize=8,
label="监测值异常点",
)
plt.plot(
anomaly_idx,
data_kf[sensor_to_plot].loc[anomaly_idx],
"go",
markersize=8,
label="Kalman修复值",
)
plt.xlabel("时间点(序号)")
plt.ylabel("监测值")
plt.title(f"{sensor_to_plot}观测值与Kalman滤波预测值异常点标记")
plt.legend()
plt.show()
# 返回输出文件的绝对路径
return os.path.abspath(output_path)
def clean_flow_data_df_kf(data: pd.DataFrame, show_plot: bool = False) -> dict:
"""
接收一个 DataFrame 数据结构,使用一维 Kalman 滤波平滑并用预测值替换基于 IQR 检测出的异常点。
区分合理的0值流量转换和异常的0值连续多个0或孤立0
返回完整的清洗后的字典数据结构。
"""
# 使用传入的 DataFrame
data = data.copy()
# 替换0值填充NaN值
data_filled = data.replace(0, np.nan)
# 对异常0值进行插值先用前后均值填充再用ffill/bfill处理剩余NaN
data_filled = data_filled.interpolate(method="linear", limit_direction="both")
# 处理剩余的0值和NaN值
data_filled = data_filled.ffill().bfill()
# 存储 Kalman 平滑结果
data_kf = pd.DataFrame(index=data_filled.index, columns=data_filled.columns)
# 平滑每一列
for col in data_filled.columns:
observations = pd.Series(data_filled[col].values).ffill().bfill()
if observations.isna().any():
observations = observations.fillna(observations.mean())
obs = observations.values.astype(float)
kf = KalmanFilter(
transition_matrices=[1],
observation_matrices=[1],
initial_state_mean=float(obs[0]),
initial_state_covariance=1,
observation_covariance=10,
transition_covariance=10,
)
state_means, _ = kf.smooth(obs)
data_kf[col] = state_means.flatten()
# 计算残差并用IQR检测异常
residuals = data_filled - data_kf
residual_thresholds = {}
for col in data_filled.columns:
res_values = residuals[col].dropna().values
q1 = np.percentile(res_values, 25)
q3 = np.percentile(res_values, 75)
iqr = q3 - q1
lower_threshold = q1 - 1.5 * iqr
upper_threshold = q3 + 1.5 * iqr
residual_thresholds[col] = (lower_threshold, upper_threshold)
# 创建完整的修复数据
cleaned_data = data_filled.copy()
anomalies_info = {}
for col in data_filled.columns:
lower, upper = residual_thresholds[col]
sensor_residuals = residuals[col]
anomaly_mask = (sensor_residuals < lower) | (sensor_residuals > upper)
anomaly_idx = data_filled.index[anomaly_mask.fillna(False)]
anomalies_info[col] = pd.DataFrame(
{
"Observed": data_filled.loc[anomaly_idx, col],
"Kalman_Predicted": data_kf.loc[anomaly_idx, col],
"Residual": sensor_residuals.loc[anomaly_idx],
}
)
# 直接在原列上替换异常值为 Kalman 预测值
cleaned_data.loc[anomaly_idx, col] = data_kf.loc[anomaly_idx, col]
# 可选可视化
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
if show_plot and len(data.columns) > 0:
sensor_to_plot = data.columns[0]
plt.figure(figsize=(12, 8))
plt.subplot(2, 1, 1)
plt.plot(
data.index,
data[sensor_to_plot],
label="原始监测值",
marker="o",
markersize=3,
alpha=0.7,
)
abnormal_zero_idx = data.index[data_filled[sensor_to_plot].isna()]
if len(abnormal_zero_idx) > 0:
plt.plot(
abnormal_zero_idx,
data[sensor_to_plot].loc[abnormal_zero_idx],
"mo",
markersize=8,
label="异常0值",
)
plt.plot(
data.index, data_kf[sensor_to_plot], label="Kalman滤波预测值", linewidth=2
)
anomaly_idx = anomalies_info[sensor_to_plot].index
if len(anomaly_idx) > 0:
plt.plot(
anomaly_idx,
data_filled[sensor_to_plot].loc[anomaly_idx],
"ro",
markersize=8,
label="IQR异常点",
)
plt.xlabel("时间点(序号)")
plt.ylabel("流量值")
plt.title(f"{sensor_to_plot}:原始数据与异常检测")
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(
data.index,
cleaned_data[sensor_to_plot],
label="修复后监测值",
marker="o",
markersize=3,
color="green",
)
plt.xlabel("时间点(序号)")
plt.ylabel("流量值")
plt.title(f"{sensor_to_plot}:修复后数据")
plt.legend()
plt.tight_layout()
plt.show()
# 返回完整的修复后字典
return cleaned_data
# # 测试
# if __name__ == "__main__":
# # 默认:脚本目录下同名 CSV 文件
# script_dir = os.path.dirname(os.path.abspath(__file__))
# default_csv = os.path.join(script_dir, "pipe_flow_data_to_clean2.0.csv")
# out = clean_flow_data_kf(default_csv)
# print("清洗后的数据已保存到:", out)
# 测试 clean_flow_data_dict 函数
if __name__ == "__main__":
import random
# 读取 szh_flow_scada.csv 文件
script_dir = os.path.dirname(os.path.abspath(__file__))
csv_path = os.path.join(script_dir, "szh_flow_scada.csv")
data = pd.read_csv(csv_path, header=0, index_col=None, encoding="utf-8")
# 排除 Time 列,随机选择 5 列
columns_to_exclude = ["Time"]
available_columns = [col for col in data.columns if col not in columns_to_exclude]
selected_columns = random.sample(available_columns, 1)
# 将选中的列转换为字典
data_dict = {col: data[col].tolist() for col in selected_columns}
print("选中的列:", selected_columns)
print("原始数据长度:", len(data_dict[selected_columns[0]]))
# 调用函数进行清洗
cleaned_dict = clean_flow_data_df_kf(data_dict, show_plot=True)
# 将清洗后的字典写回 CSV
out_csv = os.path.join(script_dir, f"{selected_columns[0]}_clean.csv")
pd.DataFrame(cleaned_dict).to_csv(out_csv, index=False, encoding="utf-8-sig")
print("已保存清洗结果到:", out_csv)
print("清洗后的字典键:", list(cleaned_dict.keys()))
print("清洗后的数据长度:", len(cleaned_dict[selected_columns[0]]))
print("测试完成:函数运行正常")

View File

@@ -0,0 +1,238 @@
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.impute import SimpleImputer
import os
def clean_pressure_data_km(input_csv_path: str, show_plot: bool = False) -> str:
"""
读取输入 CSV基于 KMeans 检测异常并用滚动平均修复。输出为 <input_basename>_cleaned.xlsx同目录
原始数据在 sheet 'raw_pressure_data',处理后数据在 sheet 'cleaned_pressusre_data'
返回输出文件的绝对路径。
"""
# 读取 CSV
input_csv_path = os.path.abspath(input_csv_path)
data = pd.read_csv(input_csv_path, header=0, index_col=None, encoding="utf-8")
# 标准化
data_norm = (data - data.mean()) / data.std()
# 聚类与异常检测
k = 3
kmeans = KMeans(n_clusters=k, init="k-means++", n_init=50, random_state=42)
clusters = kmeans.fit_predict(data_norm)
centers = kmeans.cluster_centers_
distances = np.linalg.norm(data_norm.values - centers[clusters], axis=1)
threshold = distances.mean() + 3 * distances.std()
anomaly_pos = np.where(distances > threshold)[0]
anomaly_indices = data.index[anomaly_pos]
anomaly_details = {}
for pos in anomaly_pos:
row_norm = data_norm.iloc[pos]
cluster_idx = clusters[pos]
center = centers[cluster_idx]
diff = abs(row_norm - center)
main_sensor = diff.idxmax()
anomaly_details[data.index[pos]] = main_sensor
# 修复:滚动平均(窗口可调)
data_rolled = data.rolling(window=13, center=True, min_periods=1).mean()
data_repaired = data.copy()
for pos in anomaly_pos:
label = data.index[pos]
sensor = anomaly_details[label]
data_repaired.loc[label, sensor] = data_rolled.loc[label, sensor]
# 可选可视化(使用位置作为 x 轴)
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
if show_plot and len(data.columns) > 0:
n = len(data)
time = np.arange(n)
plt.figure(figsize=(12, 8))
for col in data.columns:
plt.plot(time, data[col].values, marker="o", markersize=3, label=col)
for pos in anomaly_pos:
sensor = anomaly_details[data.index[pos]]
plt.plot(pos, data.iloc[pos][sensor], "ro", markersize=8)
plt.xlabel("时间点(序号)")
plt.ylabel("压力监测值")
plt.title("各传感器折线图(红色标记主要异常点)")
plt.legend()
plt.show()
plt.figure(figsize=(12, 8))
for col in data_repaired.columns:
plt.plot(
time, data_repaired[col].values, marker="o", markersize=3, label=col
)
for pos in anomaly_pos:
sensor = anomaly_details[data.index[pos]]
plt.plot(pos, data_repaired.iloc[pos][sensor], "go", markersize=8)
plt.xlabel("时间点(序号)")
plt.ylabel("修复后压力监测值")
plt.title("修复后各传感器折线图(绿色标记修复值)")
plt.legend()
plt.show()
# 保存到 Excel两个 sheet
input_dir = os.path.dirname(os.path.abspath(input_csv_path))
input_base = os.path.splitext(os.path.basename(input_csv_path))[0]
output_filename = f"{input_base}_cleaned.xlsx"
output_path = os.path.join(input_dir, output_filename)
if os.path.exists(output_path):
os.remove(output_path) # 覆盖同名文件
with pd.ExcelWriter(output_path, engine="openpyxl") as writer:
data.to_excel(writer, sheet_name="raw_pressure_data", index=False)
data_repaired.to_excel(writer, sheet_name="cleaned_pressusre_data", index=False)
# 返回输出文件的绝对路径
return os.path.abspath(output_path)
def clean_pressure_data_df_km(data: pd.DataFrame, show_plot: bool = False) -> dict:
"""
接收一个 DataFrame 数据结构使用KMeans聚类检测异常并用滚动平均修复。
返回清洗后的字典数据结构。
"""
# 使用传入的 DataFrame
data = data.copy()
# 填充NaN值
data = data.ffill().bfill()
# 异常值预处理
# 将0值替换为NaN然后用线性插值填充
data_filled = data.replace(0, np.nan)
data_filled = data_filled.interpolate(method="linear", limit_direction="both")
# 如果仍有NaN全为0的列用前后值填充
data_filled = data_filled.ffill().bfill()
# 标准化(使用填充后的数据)
data_norm = (data_filled - data_filled.mean()) / data_filled.std()
# 添加:处理标准化后的 NaN例如标准差为0的列防止异常数据时间段内所有数据都相同导致计算结果为 NaN
imputer = SimpleImputer(
strategy="constant", fill_value=0, keep_empty_features=True
) # 用 0 填充 NaN包括全 NaN并保留空特征
data_norm = pd.DataFrame(
imputer.fit_transform(data_norm),
columns=data_norm.columns,
index=data_norm.index,
)
# 聚类与异常检测
k = 3
kmeans = KMeans(n_clusters=k, init="k-means++", n_init=50, random_state=42)
clusters = kmeans.fit_predict(data_norm)
centers = kmeans.cluster_centers_
distances = np.linalg.norm(data_norm.values - centers[clusters], axis=1)
threshold = distances.mean() + 3 * distances.std()
anomaly_pos = np.where(distances > threshold)[0]
anomaly_indices = data.index[anomaly_pos]
anomaly_details = {}
for pos in anomaly_pos:
row_norm = data_norm.iloc[pos]
cluster_idx = clusters[pos]
center = centers[cluster_idx]
diff = abs(row_norm - center)
main_sensor = diff.idxmax()
anomaly_details[data.index[pos]] = main_sensor
# 修复:滚动平均(窗口可调)
data_rolled = data_filled.rolling(window=13, center=True, min_periods=1).mean()
data_repaired = data_filled.copy()
for pos in anomaly_pos:
label = data.index[pos]
sensor = anomaly_details[label]
data_repaired.loc[label, sensor] = data_rolled.loc[label, sensor]
# 可选可视化(使用位置作为 x 轴)
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
if show_plot and len(data.columns) > 0:
n = len(data)
time = np.arange(n)
plt.figure(figsize=(12, 8))
for col in data.columns:
plt.plot(
time, data[col].values, marker="o", markersize=3, label=col, alpha=0.5
)
for col in data_filled.columns:
plt.plot(
time,
data_filled[col].values,
marker="x",
markersize=3,
label=f"{col}_filled",
linestyle="--",
)
for pos in anomaly_pos:
sensor = anomaly_details[data.index[pos]]
plt.plot(pos, data_filled.iloc[pos][sensor], "ro", markersize=8)
plt.xlabel("时间点(序号)")
plt.ylabel("压力监测值")
plt.title("各传感器折线图红色标记主要异常点虚线为0值填充后")
plt.legend()
plt.show()
plt.figure(figsize=(12, 8))
for col in data_repaired.columns:
plt.plot(
time, data_repaired[col].values, marker="o", markersize=3, label=col
)
for pos in anomaly_pos:
sensor = anomaly_details[data.index[pos]]
plt.plot(pos, data_repaired.iloc[pos][sensor], "go", markersize=8)
plt.xlabel("时间点(序号)")
plt.ylabel("修复后压力监测值")
plt.title("修复后各传感器折线图(绿色标记修复值)")
plt.legend()
plt.show()
# 返回清洗后的字典
return data_repaired
# 测试
# if __name__ == "__main__":
# # 默认使用脚本目录下的 pressure_raw_data.csv
# script_dir = os.path.dirname(os.path.abspath(__file__))
# default_csv = os.path.join(script_dir, "pressure_raw_data.csv")
# out_path = clean_pressure_data_km(default_csv, show_plot=False)
# print("保存路径:", out_path)
# 测试 clean_pressure_data_dict_km 函数
if __name__ == "__main__":
import random
# 读取 szh_pressure_scada.csv 文件
script_dir = os.path.dirname(os.path.abspath(__file__))
csv_path = os.path.join(script_dir, "szh_pressure_scada.csv")
data = pd.read_csv(csv_path, header=0, index_col=None, encoding="utf-8")
# 排除 Time 列,随机选择 5 列
columns_to_exclude = ["Time"]
available_columns = [col for col in data.columns if col not in columns_to_exclude]
selected_columns = random.sample(available_columns, 5)
# 将选中的列转换为字典
data_dict = {col: data[col].tolist() for col in selected_columns}
print("选中的列:", selected_columns)
print("原始数据长度:", len(data_dict[selected_columns[0]]))
# 调用函数进行清洗
cleaned_dict = clean_pressure_data_df_km(data_dict, show_plot=True)
print("清洗后的字典键:", list(cleaned_dict.keys()))
print("清洗后的数据长度:", len(cleaned_dict[selected_columns[0]]))
print("测试完成:函数运行正常")

View File

@@ -0,0 +1,3 @@
from .Fdataclean import *
from .Pdataclean import *
from .pipeline_health_analyzer import *

View File

@@ -0,0 +1,109 @@
import wntr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn.cluster
import os
class QD_KMeans(object):
def __init__(self, wn, num_monitors):
# self.inp = inp
self.cluster_num = num_monitors # 聚类中心个数,也即测压点个数
self.wn=wn
self.monitor_nodes = []
self.coords = []
self.junction_nodes = {} # Added missing initialization
def get_junctions_coordinates(self):
for junction_name in self.wn.junction_name_list:
junction = self.wn.get_node(junction_name)
self.junction_nodes[junction_name] = junction.coordinates
self.coords.append(junction.coordinates )
# print(f"Total junctions: {self.junction_coordinates}")
def select_monitoring_points(self):
if not self.coords: # Add check if coordinates are collected
self.get_junctions_coordinates()
coords = np.array(self.coords)
coords_normalized = (coords - coords.min(axis=0)) / (coords.max(axis=0) - coords.min(axis=0))
kmeans = sklearn.cluster.KMeans(n_clusters= self.cluster_num, random_state=42)
kmeans.fit(coords_normalized)
for center in kmeans.cluster_centers_:
distances = np.sum((coords_normalized - center) ** 2, axis=1)
nearest_node = self.wn.junction_name_list[np.argmin(distances)]
self.monitor_nodes.append(nearest_node)
return self.monitor_nodes
def visualize_network(self):
"""Visualize network with monitoring points"""
ax=wntr.graphics.plot_network(self.wn,
node_attribute=self.monitor_nodes,
node_size=30,
title='Optimal sensor')
plt.show()
def kmeans_sensor_placement(name: str, sensor_num: int, min_diameter: int) -> list:
inp_name = f'./db_inp/{name}.db.inp'
wn= wntr.network.WaterNetworkModel(inp_name)
wn_cluster=QD_KMeans(wn, sensor_num)
# Select monitoring pointse
sensor_ids= wn_cluster.select_monitoring_points()
# wn_cluster.visualize_network()
return sensor_ids
if __name__ == "__main__":
#sensorindex = get_ID(name='suzhouhe_2024_cloud_0817', sensor_num=30, min_diameter=500)
sensorindex = kmeans_sensor_placement(name='szh', sensor_num=50, min_diameter=300)
print(sensorindex)

View File

@@ -0,0 +1,142 @@
import os
import joblib
import pandas as pd
import matplotlib.pyplot as plt
class PipelineHealthAnalyzer:
"""
管道健康分析器类,使用随机生存森林模型预测管道的生存概率。
该类封装了模型加载和预测功能,便于在其他项目中复用。
模型基于4个特征进行生存分析预测材料、直径、流速、压力。
使用前需确保安装依赖joblib, pandas, numpy, scikit-survival, matplotlib。
"""
def __init__(self, model_path: str = "model/my_survival_forest_model_quxi.joblib"):
"""
初始化分析器,加载预训练的随机生存森林模型。
:param model_path: 模型文件的路径(默认为相对路径 'model/my_survival_forest_model_quxi.joblib')。
:raises FileNotFoundError: 如果模型文件不存在。
:raises Exception: 如果模型加载失败。
"""
# 确保 model 目录存在
model_dir = os.path.dirname(model_path)
if model_dir and not os.path.exists(model_dir):
os.makedirs(model_dir, exist_ok=True)
if not os.path.exists(model_path):
raise FileNotFoundError(f"模型文件未找到: {model_path}")
try:
self.rsf = joblib.load(model_path)
self.features = [
"Material",
"Diameter",
"Flow Velocity",
"Pressure", # 'Temperature', 'Precipitation',
# 'Location', 'Structural Defects', 'Functional Defects'
]
except Exception as e:
raise Exception(f"加载模型时出错: {str(e)}")
def predict_survival(self, data: pd.DataFrame) -> list:
"""
基于输入数据预测生存函数。
:param data: pandas DataFrame包含4个必需特征列。数据应为数值型或可转换为数值型。
:return: 生存函数列表每个元素为一个生存函数对象包含时间点x和生存概率y
:raises ValueError: 如果数据缺少必需特征或格式不正确。
"""
# 检查必需特征是否存在
missing_features = [feat for feat in self.features if feat not in data.columns]
if missing_features:
raise ValueError(f"数据缺少必需特征: {missing_features}")
# 提取特征数据
try:
x_test = data[self.features].astype(float) # 确保数值型
except ValueError as e:
raise ValueError(f"特征数据转换失败,请检查数据类型: {str(e)}")
# 进行预测
survival_functions = self.rsf.predict_survival_function(x_test)
return list(survival_functions)
def plot_survival(
self, survival_functions: list, save_path: str = None, show_plot: bool = True
):
"""
可视化生存函数,生成生存概率图表。
:param survival_functions: predict_survival返回的生存函数列表。
:param save_path: 可选,保存图表的路径(.png格式。如果为None则不保存。
:param show_plot: 是否显示图表(在交互环境中)。
"""
plt.figure(figsize=(10, 6))
for i, sf in enumerate(survival_functions):
plt.step(sf.x, sf.y, where="post", label=f"样本 {i + 1}")
plt.xlabel("时间(年)")
plt.ylabel("生存概率")
plt.title("管道生存概率预测")
plt.legend()
plt.grid(True, alpha=0.3)
if save_path:
plt.savefig(save_path, dpi=300, bbox_inches="tight")
print(f"图表已保存到: {save_path}")
if show_plot:
plt.show()
else:
plt.close()
# 调用说明示例
"""
在其他项目中使用PipelineHealthAnalyzer类的步骤
1. 安装依赖在requirements.txt中添加
joblib==1.5.0
pandas==2.2.3
numpy==2.0.2
scikit-survival==0.23.1
matplotlib==3.9.4
2. 导入类:
from pipeline_health_analyzer import PipelineHealthAnalyzer
3. 初始化分析器(替换为实际模型路径):
analyzer = PipelineHealthAnalyzer(model_path='path/to/my_survival_forest_model3-10.joblib')
4. 准备数据pandas DataFrame包含9个特征列
import pandas as pd
data = pd.DataFrame({
'Material': [1, 2], # 示例数据
'Diameter': [100, 150],
'Flow Velocity': [1.5, 2.0],
'Pressure': [50, 60],
'Temperature': [20, 25],
'Precipitation': [0.1, 0.2],
'Location': [1, 2],
'Structural Defects': [0, 1],
'Functional Defects': [0, 0]
})
5. 进行预测:
survival_funcs = analyzer.predict_survival(data)
6. 查看结果(每个样本的生存概率随时间变化):
for i, sf in enumerate(survival_funcs):
print(f"样本 {i+1}: 时间点: {sf.x[:5]}..., 生存概率: {sf.y[:5]}...")
7. 可视化(可选):
analyzer.plot_survival(survival_funcs, save_path='survival_plot.png')
注意:
- 数据格式必须匹配特征列表,特征值为数值型。
- 模型文件需从原项目复制或重新训练。
- 如果需要自定义特征或模型参数可修改类中的features列表或继承此类。
"""

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,654 @@
# 改进灵敏度法
import networkx
import numpy as np
import pandas
import wntr
import pandas as pd
import copy
import matplotlib.pyplot as plt
import networkx as nx
from sklearn.cluster import KMeans
from wntr.epanet.toolkit import EpanetException
from numpy.linalg import slogdet
import random
from tjnetwork import *
from matplotlib.lines import Line2D
from sklearn.cluster import SpectralClustering
import libpysal as ps
from spopt.region import Skater
from shapely.geometry import Point
import geopandas as gpd
from sklearn.metrics import pairwise_distances
import project_info
# 2025/03/12
# Step1: 获取节点坐标
def getCoor(wn: wntr.network.WaterNetworkModel) -> pandas.DataFrame:
"""
获取管网模型的节点坐标
:param wn: 由wntr生成的模型
:return: 节点坐标
"""
# site: pandas.Series
# index节点名称wn.node_name_list
# values每个节点的坐标格式为 tuple如 (x, y) 或 (x, y, z)
site = wn.query_node_attribute('coordinates')
# Coor: pandas.Series
# index与site相同节点名称
# values坐标转换为numpy.ndarray如array([10.5, 20.3])
Coor = site.apply(lambda x: np.array(x)) # 将节点坐标转换为numpy数组
# x, y: list[float]
x = [] # 存储所有节点的 x 坐标
y = [] # 存储所有节点的 y 坐标
for i in range(0, len(Coor)):
x.append(Coor.values[i][0]) # 将 x 坐标存入 x 列表。
y.append(Coor.values[i][1]) # 将 y 坐标存入 y 列表
# xy: dict[str, list], x、y 坐标的字典
xy = {'x': x, 'y': y}
# Coor_node: pandas.DataFrame, 存储节点 x, y 坐标的 DataFrame
Coor_node = pd.DataFrame(xy, index=wn.node_name_list, columns=['x', 'y'])
return Coor_node
# 2025/03/12
# Step2: KMeans 聚类
# 将节点用kmeans根据坐标分为k组存入字典g
def kgroup(coor: pandas.DataFrame, knum: int) -> dict[int, list[str]]:
"""
使用KMeans聚类将节点坐标分组
:param coor: 存储所有节点的坐标数据
:param knum: 需要分成的聚类数
:return: 聚类结果字典
"""
g = {}
# estimator: sklearn.cluster.KMeans,KMeans 聚类模型
estimator = KMeans(n_clusters=knum)
estimator.fit(coor)
# label_pred: numpy.ndarrayint,每个点的类别标签
label_pred = estimator.labels_
for i in range(0, knum):
g[i] = coor[label_pred == i].index.tolist()
return g
def skater_partition(G, n_clusters):
"""
使用 SKATER 算法对输入的无向图 G 进行区域划分,
保证每个划分区域在图论意义上是连通的,
同时依据节点坐标的空间信息进行划分。
参数:
G: networkx.Graph
带有节点坐标属性(键为 'pos')的无向图。
n_clusters: int
希望划分的区域数量。
返回:
groups: dict
字典形式的聚类结果,键为区域编号,值为该区域内的节点列表。
"""
# 1. 获取所有节点坐标,假设每个节点都有 'pos' 属性
pos = nx.get_node_attributes(G, 'pos')
nodes = list(G.nodes())
# 构造坐标数组:每行为 [x, y]
coords = np.array([pos[node] for node in nodes])
# 2. 构造 GeoDataFrame创建 DataFrame 并生成 geometry 列
df = pd.DataFrame(coords, columns=['x', 'y'], index=nodes)
# 利用 shapely 的 Point 构造空间位置
df['geometry'] = df.apply(lambda row: Point(row['x'], row['y']), axis=1)
gdf = gpd.GeoDataFrame(df, geometry='geometry')
# 3. 构造空间权重矩阵,使用 4 近邻方法k=4可根据实际情况调整
w = ps.weights.KNN.from_array(coords, k=4)
w.transform = 'R'
# 4. 调用 SKATER新版本 API 要求传入 gdf, w 以及 attrs_name这里使用 'x' 和 'y' 作为属性)
skater = Skater(gdf, w, attrs_name=['x', 'y'], n_clusters=n_clusters)
skater.solve()
# 5. 获取聚类标签,构造成字典格式
labels = skater.labels_
groups = {}
for label, node in zip(labels, nodes):
groups.setdefault(label, []).append(node)
return groups
def spectral_partition(G, n_clusters):
"""
利用谱聚类算法对图 G 进行分区:
1. 根据所有节点的空间坐标计算欧氏距离矩阵;
2. 利用高斯核函数构造相似度矩阵;
3. 使用 SpectralClustering 进行归一化割,返回分区结果。
参数:
G: networkx.Graph
每个节点需要有 'pos' 属性,其值为 (x, y) 坐标。
n_clusters: int
希望划分的聚类数目。
返回:
groups: dict
键为聚类标签,值为该聚类对应的节点列表。
"""
# 1. 获取节点空间坐标,注意保证每个节点都有 'pos' 属性
pos_dict = nx.get_node_attributes(G, 'pos')
nodes = list(G.nodes())
coords = np.array([pos_dict[node] for node in nodes])
# 2. 计算节点之间的欧氏距离矩阵
D = pairwise_distances(coords, metric='euclidean')
# 3. 计算 sigma 值:这里取所有距离的均值,当然也可以根据实际情况调整
sigma = np.mean(D)
# 4. 构造相似度矩阵:使用高斯核函数
# A(i, j) = exp( -d(i,j)^2 / (2*sigma^2) )
A = np.exp(- (D ** 2) / (2 * sigma ** 2))
# 5. 使用谱聚类进行图分区
clustering = SpectralClustering(n_clusters=n_clusters,
affinity='precomputed',
random_state=0)
labels = clustering.fit_predict(A)
# 6. 构造字典形式的分区结果
groups = {}
for label, node in zip(labels, nodes):
groups.setdefault(label, []).append(node)
return groups
# 2025/03/12
# Step3: wn_func类水力计算
# wn_func 主要用于计算:
# 水力距离hydraulic length即节点之间的水力阻力。
# 灵敏度分析sensitivity analysis用于优化测压点的布置。
# 一些与水力相关的函数,包括 CtoS求水力距离,stafun求状态函数F
# # diff求F对P的导数返回灵敏度矩阵A
# # sensitivity返回灵敏度和总灵敏度
class wn_func(object):
# Step3.1: 初始化
def __init__(self, wn: wntr.network.WaterNetworkModel, min_diameter: int):
"""
获取管网模型信息
:param wn: 由wntr生成的模型
:param min_diameter: 安装的最小管径
"""
# self.results: wntr.sim.results.SimulationResults,仿真结果,包含压力、流量、水头等数据
self.results = wntr.sim.EpanetSimulator(wn).run_sim() # 存储运行结果
self.wn = wn
# self.qpandas.DataFrame,管道流量,索引为时间步长,列为管道名称
self.q = self.results.link['flowrate']
# ReservoirIndex / Tankindex: list[str],水库 / 水箱节点名称列表
ReservoirIndex = wn.reservoir_name_list
Tankindex = wn.tank_name_list
# 删除水库节点,删除与直接水库相连的虚拟管道
# self.pipes: list[str],所有管道的名称
self.pipes = wn.pipe_name_list
# self.nodes: list[str],所有节点的名称
self.nodes = wn.node_name_list
# self.coordinatespandas.Series,节点坐标,索引为节点名,值为 (x, y) 坐标的 tuple
self.coordinates = wn.query_node_attribute('coordinates')
# allpumps / allvalves: list[str],所有泵/阀门名称列表
allpumps = wn.pump_name_list
allvalves = wn.valve_name_list
# pumpstnode / pumpednode / valvestnode / valveednode: list[str],存储泵和阀门 起终点节点的名称
pumpstnode = []
pumpednode = []
valvestnode = []
valveednode = []
# Reservoirpipe / Reservoirednode: list[str],记录与水库相关的管道和节点
Reservoirpipe = []
Reservoirednode = []
for pump in allpumps:
pumpstnode.append(wn.links[pump].start_node.name)
pumpednode.append(wn.links[pump].end_node.name)
for valve in allvalves:
valvestnode.append(wn.links[valve].start_node.name)
valveednode.append(wn.links[valve].end_node.name)
for pipe in self.pipes:
if wn.links[pipe].start_node.name in ReservoirIndex:
Reservoirpipe.append(pipe)
Reservoirednode.append(wn.links[pipe].end_node.name)
if wn.links[pipe].start_node.name in Tankindex:
Reservoirpipe.append(pipe)
Reservoirednode.append(wn.links[pipe].end_node.name)
if wn.links[pipe].end_node.name in Tankindex:
Reservoirpipe.append(pipe)
Reservoirednode.append(wn.links[pipe].start_node.name)
# 泵的起终点、tank、reservoir
# self.delnodes: list[str],需要删除的节点(包括水库、泵、阀门连接的节点)
self.delnodes = list(
set(ReservoirIndex).union(Tankindex, pumpstnode, pumpednode, valvestnode, valveednode, Reservoirednode))
# 泵、起终点为tank、reservoir的管道
# self.delpipes: list[str],需要删除的管道(包括水库、泵、阀门连接的管道)
self.delpipes = list(set(wn.pump_name_list).union(wn.valve_name_list).union(Reservoirpipe))
self.pipes = [pipe for pipe in wn.pipe_name_list if pipe not in self.delpipes]
# self.L: list[float],所有管道的长度(以米为单位)
self.L = wn.query_link_attribute('length')[self.pipes].tolist()
self.n = len(self.nodes)
self.m = len(self.pipes)
# self.unit_headloss: list[float],单位水头损失headloss 数据的第一行,单位:米/km
self.unit_headloss = self.results.link['headloss'].iloc[0, :].tolist()
##
self.delnodes1 = list(set(ReservoirIndex).union(Tankindex))
# === 改动新增部分:筛选管径小于 min_diameter 的管道节点 ===
self.less_than_min_diameter_junction_list = []
for pipe in self.pipes:
diameter = wn.links[pipe].diameter
if diameter < min_diameter:
start_node = wn.links[pipe].start_node.name
end_node = wn.links[pipe].end_node.name
self.less_than_min_diameter_junction_list.extend([start_node, end_node])
# 去重
self.less_than_min_diameter_junction_list = list(set(self.less_than_min_diameter_junction_list))
# Step3.2: 计算水力距离
def CtoS(self):
"""
计算水力距离矩阵
:return:
"""
# 水力距离:当行索引对应的节点为控制点时,列索引对应的节点距离控制点的(路径*水头损失)的最小值
# nodeslist[str](节点名称)
nodes = copy.deepcopy(self.nodes)
# pipeslist[str](管道名称)
pipes = self.pipes
wn = self.wn
# n / mint节点数 / 管道数)
n = self.n
m = self.m
s1 = [0] * m
q = self.q
L = self.L
# H1pandas.DataFrame,水头数据,索引为时间步长,列为节点名
H1 = self.results.node['head'].T
# hhlist[float],计算管道两端水头之差
hh = []
# 水头损失
for p in pipes:
h1 = self.wn.links[p].start_node.name
h1 = H1.loc[str(h1)]
h2 = self.wn.links[p].end_node.name
h2 = H1.loc[str(h2)]
hh.append(abs(h1 - h2))
hh = np.array(hh)
# headlosspandas.DataFrame,管道水头损失矩阵
headloss = pd.DataFrame(hh, index=pipes).T
# s1:管道阻力系数s2将管道阻力系数与管道的起始节点和终止节点对应
hf = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
weightL = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
# s2为对应管道起始节点与终止节点的粗糙度系数矩阵index代表起始节点columns代表终止节点
G = nx.DiGraph()
for i in range(0, m):
pipe = pipes[i]
a = wn.links[pipe].start_node.name
b = wn.links[pipe].end_node.name
if q.loc[0, pipe] > 0:
hf.loc[a, b] = headloss.loc[0, pipe]
weightL.loc[a, b] = headloss.loc[0, pipe] * L[i]
G.add_weighted_edges_from([(a, b, weightL.loc[a, b])])
else:
hf.loc[b, a] = headloss.loc[0, pipe]
weightL.loc[b, a] = headloss.loc[0, pipe] * L[i]
G.add_weighted_edges_from([(b, a, weightL.loc[b, a])])
hydraulicL = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
for a in nodes:
if a in G.nodes:
d = nx.shortest_path_length(G, source=a, weight='weight')
for b in list(d.keys()):
hydraulicL.loc[a, b] = d[b]
hydraulicL = hydraulicL.drop(self.delnodes)
hydraulicL = hydraulicL.drop(self.delnodes, axis=1)
# 求加权水力距离
return hydraulicL, G
# Step3.3: 计算灵敏度矩阵
# 获取关系矩阵
def get_Conn(self):
"""
计算管网连接关系矩阵
:return:
"""
m = self.wn.num_links
n = self.wn.num_nodes
p = self.wn.num_pumps
v = self.wn.num_valves
self.nonjunc_index = []
self.non_link_index = []
for r in self.wn.reservoirs():
self.nonjunc_index.append(r[0])
for t in self.wn.tanks():
self.nonjunc_index.append(t[0])
# Connnumpy.matrix节点-管道连接矩阵,起点 -1终点 1
Conn = np.mat(np.zeros([n, m - p - v])) # 节点和管道的关系矩阵,行为节点,列为管道,起点为-1终点为1
# NConnnumpy.matrix节点-节点连接矩阵,有管道相连的地方设为 1
NConn = np.mat(np.zeros([n, n])) # 节点之间的关系之间有管道为1反之为0
# pipeslist[str],去除泵和阀门的管道列表
pipes = [pipe for pipe in self.wn.pipes() if pipe not in self.wn.pumps() and pipe not in self.wn.valves()]
for pipe_name, pipe in pipes:
start = self.wn.node_name_list.index(pipe.start_node_name)
end = self.wn.node_name_list.index(pipe.end_node_name)
p_index = self.wn.link_name_list.index(pipe_name)
Conn[start, p_index] = -1
Conn[end, p_index] = 1
NConn[start, end] = 1
NConn[end, start] = 1
self.A = Conn
link_name_list = [link for link in self.wn.link_name_list if
link not in self.wn.pump_name_list and link not in self.wn.valve_name_list]
self.A2 = pd.DataFrame(self.A, index=self.wn.node_name_list, columns=link_name_list)
self.A2 = self.A2.drop(self.delnodes)
for pipe in self.delpipes:
if pipe not in self.wn.pump_name_list and pipe not in self.wn.valve_name_list:
self.A2 = self.A2.drop(columns=pipe)
self.junc_list = self.A2.index
self.A2 = np.mat(self.A2) # 节点管道关系
self.A3 = NConn
def Jaco(self, hL: pandas.DataFrame):
"""
计算灵敏度矩阵(节点压力对粗糙度变化的响应)
:param hL: 水力距离矩阵
:return:
"""
# global result
# Anumpy.matrix, 节点-管道关系矩阵
A = self.A2
wn = self.wn
try:
result = wntr.sim.EpanetSimulator(wn).run_sim()
except EpanetException:
pass
finally:
h = result.link['headloss'][self.pipes].values[0]
q = result.link['flowrate'][self.pipes].values[0]
l = self.wn.query_link_attribute('length')[self.pipes]
C = self.wn.query_link_attribute('roughness')[self.pipes]
# headlossnumpy.ndarray,水头损失数组
headloss = np.array(h)
# 调整流量方向
for i in range(0, len(q)):
if q[i] < 0:
A[:, i] = -A[:, i]
# qnumpy.ndarray,流量数组
q = np.abs(q)
# 两个灵敏度矩阵
# B / Snumpy.matrix,灵敏度计算的中间矩阵
B = np.mat(np.diag(q / ((1.852 * headloss) + 1e-10)))
S = np.mat(np.diag(q / C))
# Xnumpy.matrix, 灵敏度矩阵
X = A * B * A.T
try:
det = np.linalg.det(X)
except RuntimeError as e:
sign, logdet = slogdet(X) # 防止溢出
det = sign * np.exp(logdet)
if det != 0:
J_H_Cw = X.I * A * S
# J_H_Q = -X.I
J_q_Cw = S - B * A.T * X.I * A * S # 去掉了delnodes和delpipes
# J_q_Q = B * A.T * X.I
else: # 当X不可逆
J_H_Cw = np.linalg.pinv(X) @ A @ S
# J_H_Q = -np.linalg.pinv(X)
J_q_Cw = S - B * A.T * np.linalg.pinv(X) * A * S
# J_q_Q = B * A.T * np.linalg.pinv(X)
Sen_pressure = []
S_pressure = np.abs(J_H_Cw).sum(axis=1).tolist() # 修改为绝对值
for ss in S_pressure:
Sen_pressure.append(ss[0])
# 求总灵敏度
SS_pressure = copy.deepcopy(hL)
for i in range(0, len(Sen_pressure)):
SS_pressure.iloc[i, :] = SS_pressure.iloc[i, :] * Sen_pressure[i]
SS = copy.deepcopy(hL)
for i in range(0, len(Sen_pressure)):
SS.iloc[i, :] = SS.iloc[i, :] * Sen_pressure[i]
# SS[i,j]:节点nodes[i]的灵敏度*该节点到nodes[j]的水力距离
return SS
# 2025/03/12
# Step4: 传感器布置优化
# Sensorplacement
# weight分配权重
# sensor传感器布置的位置
class Sensorplacement(wn_func):
"""
Sensorplacement 类继承了 wn_func 类,并且用于计算和优化传感器布置的位置。
"""
def __init__(self, wn: wntr.network.WaterNetworkModel, sensornum: int, min_diameter: int):
"""
:param wn: 由wntr生成的模型
:param sensornum: 传感器的数量
:param min_diameter: 安装的最小管径
"""
wn_func.__init__(self, wn, min_diameter=min_diameter)
self.sensornum = sensornum
# 1.某个节点到所有节点的加权距离之和
# 2.某个节点到该组内所有节点的加权距离之和
def sensor(self, SS: pandas.DataFrame, G: networkx.Graph, group: dict[int, list[str]]):
"""
sensor 方法是用来根据灵敏度矩阵 SS 和加权图 G 来确定传感器布置位置的
:param SS: 灵敏度矩阵每个节点的行和列代表不同节点矩阵元素表示节点间的灵敏度。SS.iloc[i, :] 表示第 i 行对应节点 i 到所有其他节点的灵敏度
:param G: 加权图,表示管网的拓扑结构,每个节点通过管道连接。图的边的权重通常是根据水力距离或者流量等计算的
:param group: 节点分组,字典的键是分组编号,值是该组的节点名称列表
:return:
"""
# 传感器布置个数以及位置
# W = self.weight()
n = self.n - len(self.delnodes)
nodes = copy.deepcopy(self.nodes)
for node in self.delnodes:
nodes.remove(node)
# sumSSlist[float],每个节点到其他节点的灵敏度之和。SS.iloc[i, :] 返回第 i 个节点与所有其他节点的灵敏度值sum(SS.iloc[i, :]) 计算这些灵敏度值的总和。
sumSS = []
for i in range(0, n):
sumSS.append(sum(SS.iloc[i, :]))
# 一个整数范围表示每个节点的索引用作sumSS_ DataFrame的索引
indices = range(0, n)
# sumSS_pandas.DataFrame,将 sumSS 转换成 DataFrame 格式,并且将节点的总灵敏度保存到 CSV 文件 sumSS_data.csv 中
sumSS_ = pd.DataFrame(np.array(sumSS), index=indices)
# sumSS_.to_csv('sumSS_data.csv') # 存储节点总灵敏度
# sumSSpandas.DataFrame,sumSS 被转换为 DataFrame 类型并且按总灵敏度即灵敏度之和降序排列。此时sumSS 是按节点的灵敏度之和排序的 DataFrame
sumSS = pd.DataFrame(np.array(sumSS), index=nodes)
sumSS = sumSS.sort_values(by=[0], ascending=[False])
# sensorindexlist[str],用于存储根据灵敏度排序选出的传感器位置的节点名称,存储根据总灵敏度排序的节点列表,用于传感器布置
sensorindex = []
# sensorindex_2list[str],用于存储每组内根据灵敏度排序选出的传感器位置的节点名称,存储每个组内根据灵敏度排序选择的传感器节点
sensorindex_2 = []
# group_Sdict[int, pandas.DataFrame],存储每个组内的灵敏度矩阵
group_S = {}
# group_sumSSdict[int, list[float]],存储每个组内节点的总灵敏度,值为每个组内节点灵敏度之和的列表
group_sumSS = {}
# 改动
for i in range(0, len(group)):
for node in self.delnodes:
# 这里的group[i]是每个组的节点列表代码首先去除已经被标记为删除的节点self.delnodes
if node in group[i]:
group[i].remove(node)
group_S[i] = SS.loc[group[i], group[i]]
# 对每个组内的节点计算组内节点的总灵敏度group_sumSS[i])。它将每个组内节点的灵敏度值相加,并且按灵敏度降序排序
group_sumSS[i] = []
for j in range(0, len(group[i])):
group_sumSS[i].append(sum(group_S[i].iloc[j, :]))
group_sumSS[i] = pd.DataFrame(np.array(group_sumSS[i]), index=group[i])
group_sumSS[i] = group_sumSS[i].sort_values(by=[0], ascending=[False])
for node in self.less_than_min_diameter_junction_list:
# 这里的group_sumSS[i]是每个分组的灵敏度节点排序列表去除已经被标记为删除的节点self.less_than_min_diameter_junction_list
if node in group_sumSS[i]:
group_sumSS[i].remove(node)
pass
# 1.选sumSS最大的节点然后把这个节点所在的那个组删掉就可以不再从这个组选点。再重新排序选sumSS最大的;
# 2.在每组内选group_sumSS最大的节点
# 在这个循环中首先选择灵敏度最高的节点Smaxnode并添加到sensorindex。然后根据灵敏度排序删除已选的节点并继续选择下一个灵敏度最大的节点。这个过程用于选择传感器的位置
sensornum = self.sensornum
for i in range(0, sensornum):
# Smaxnodestr,最大灵敏度节点sumSS.index[0] 表示灵敏度最高的节点
Smaxnode = sumSS.index[0]
sensorindex.append(Smaxnode)
sensorindex_2.append(group_sumSS[i].index[0])
for key, value in group.items():
if Smaxnode in value:
sumSS = sumSS.drop(index=group[key])
continue
sumSS = sumSS.sort_values(by=[0], ascending=[False])
return sensorindex, sensorindex_2
# 2025/03/13
def get_ID(name: str, sensor_num: int, min_diameter: int) -> list[str]:
"""
获取布置测压点的坐标,初始测压点布置根据灵敏度来布置计算初始情况下的校准过程的error
:param name: 数据库名称
:param sensor_num: 测压点数目
:param min_diameter: 安装的最小管径
:return: 测压点节点ID
"""
# inp_file_realstr,输入文件名,表示原始水力模型文件的路径,该文件格式为 EPANET 输入文件(.inp包含管网的结构信息、节点、管道、泵等数据
inp_file_real = f'./db_inp/{name}.db.inp'
# sensornumint,需要布置的传感器数量
# sensornum = sensor_num
# wn_realwntr.network.WaterNetworkModel,加载 EPANET 水力模型
wn_real = wntr.network.WaterNetworkModel(inp_file_real) # 真实粗糙度的原始管网
# sim_realwntr.sim.EpanetSimulator,创建一个水力仿真器对象
sim_real = wntr.sim.EpanetSimulator(wn_real)
# results_realwntr.sim.results.SimulationResults,运行仿真并返回结果
results_real = sim_real.run_sim()
# real_Clist[float],包含所有管道粗糙度的列表
real_C = wn_real.query_link_attribute('roughness').tolist()
# wn_fun1wn_func继承自 object创建 wn_func 类的实例,传入 wn_real 水力模型对象。wn_func 用于计算管网相关的水力属性,比如水力距离、灵敏度等
wn_fun1 = wn_func(wn_real, min_diameter=min_diameter)
# nodeslist[str],管网的节点名称列表
nodes = wn_fun1.nodes
# delnodeslist[str],被删除的节点(如水库、泵、阀门连接的节点等)
delnodes = wn_fun1.delnodes
# Coor_nodepandas.DataFrame
Coor_node = getCoor(wn_real)
Coor_node = Coor_node.drop(wn_fun1.delnodes)
nodes = [node for node in wn_fun1.nodes if node not in delnodes]
# coordinatespandas.Series存储所有节点的坐标类型为 Series索引为节点名称值为 (x, y) 坐标对
coordinates = wn_fun1.coordinates
# 随机产生监测点
# junctionnumintnodes 的长度,表示节点的数量
junctionnum = len(nodes)
# random_numberslist[int],使用 random.sample 随机选择 sensornum20个节点的编号。它返回一个不重复的随机编号列表
# random_numbers = random.sample(range(junctionnum), sensor_num)
# for i in range(sensor_num):
# # print(random_numbers[i])
wn_fun1.get_Conn()
# hLpandas.DataFrame水力距离矩阵表示每个节点到其他节点的水力阻力
# Gnetworkx.DiGraph加权有向图表示管网的拓扑结构节点之间的边带有权重
hL, G = wn_fun1.CtoS()
# SSpandas.DataFrame灵敏度矩阵表示每个节点对管网变化如粗糙度、流量等的响应
SS = wn_fun1.Jaco(hL)
# groupdict[int, list[str]],使用 kgroup 函数将节点按坐标分成若干组每组包含的节点数不一定相同。group 是一个字典,键为分组编号,值为节点名列表
G1 = wn_real.to_graph()
G1 = G1.to_undirected() # 变为无向图
group = kgroup(Coor_node, sensor_num)
# group = skater_partition(G1, sensor_num)
# group = spectral_partition(G1, sensor_num)
# print(group)
# --------------------- 保存 group 数据 ---------------------
# 将 group 数据转换为一个“长格式”的 DataFrame
# 每一行记录一个节点及其所属的分组
# group_data = []
# for group_id, node_list in group.items():
# for node in node_list:
# group_data.append({"Group": group_id, "Node": node})
#
# df_group = pd.DataFrame(group_data)
#
# # 保存为 Excel 文件,文件名为 "group.xlsx"index=False 表示不保存行索引
# df_group.to_excel("group.xlsx", index=False)
# wn_funSensorplacement继承自wn_func
# 创建Sensorplacement类的实例传入水力网络模型wn_real和传感器数量sensornum。Sensorplacement用于计算和布置传感器
wn_fun = Sensorplacement(wn_real, sensor_num, min_diameter=min_diameter)
wn_fun.__dict__.update(wn_fun1.__dict__)
# sensorindexlist[str],初始传感器布置位置的节点名称
# sensorindex_2list[str],根据分组选择的传感器位置
sensorindex, sensorindex_2 = wn_fun.sensor(SS, G, group) # 初始的sensorindex
# print(str(sensor_num), "个测压点,测压点位置:", sensorindex)
# 重新打开数据库
# if is_project_open(name=name):
# close_project(name=name)
# open_project(name=name)
# for node_id in sensorindex :
# sensor_coord[node_id] = get_node_coord(name=name, node_id=node_id)
# close_project(name=name)
# print(sensor_coord)
# # 分区画图
# colorlist = ['lightpink', 'coral', 'rosybrown', 'olive', 'powderblue', 'lightskyblue', 'steelblue', 'peachpuff','brown','silver','indigo','lime','gold','violet','maroon','navy','teal','magenta','cyan',
# 'burlywood', 'tan', 'slategrey', 'thistle', 'lightseagreen', 'lightgreen', 'red','blue','yellow','orange','purple','grey','green','pink','lightblue','beige','chartreuse','turquoise','lavender','fuchsia','coral']
# G = wn_real.to_graph()
# G = G.to_undirected() # 变为无向图
# pos = nx.get_node_attributes(G, 'pos')
# pass
#
# for i in range(0, sensor_num):
# ax = plt.gca()
# ax.set_title(inp_file_real + str(sensor_num))
# nodes = nx.draw_networkx_nodes(G, pos, nodelist=group[i], node_color=colorlist[i], node_size=10)
# nodes = nx.draw_networkx_nodes(G, pos,
# nodelist=sensorindex_2, node_color='red', node_size=70, node_shape='*'
# )
# edges = nx.draw_networkx_edges(G, pos)
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# ax.spines['left'].set_visible(False)
# plt.savefig(inp_file_real + str(sensor_num) + ".png", dpi=300)
# plt.show()
#
# wntr.graphics.plot_network(wn_real, node_attribute=sensorindex_2, node_size=50, node_labels=False,
# title=inp_file_real + '_Projetion' + str(sensor_num))
# plt.savefig(inp_file_real + '_S' + str(sensor_num) + ".png", dpi=300)
# plt.show()
return sensorindex
if __name__ == '__main__':
sensorindex = get_ID(name=project_info.name, sensor_num=20, min_diameter=300)
print(sensorindex)
# 将 sensor_coord 字典转换为 DataFrame
# 使用 orient='index' 表示字典的键作为 DataFrame 的行索引,
# 数据中每个键对应的 value 是一个子字典,其键 'x' 和 'y' 成为 DataFrame 的列名
# df_sensor_coord = pd.DataFrame.from_dict(sensor_coord, orient='index')
#
# # 将索引名称设为 'Node'
# df_sensor_coord.index.name = 'Node'
#
# # 保存到 Excel 文件
# df_sensor_coord.to_excel("sensor_coord.xlsx", index=True)

View File

@@ -0,0 +1,557 @@
# 改进灵敏度法
import networkx
import numpy as np
import pandas
import wntr
import pandas as pd
import copy
import matplotlib.pyplot as plt
import networkx as nx
from sklearn.cluster import KMeans
from wntr.epanet.toolkit import EpanetException
from numpy.linalg import slogdet
import random
from tjnetwork import *
import project_info
# 2025/03/12
# Step1: 获取节点坐标
def getCoor(wn: wntr.network.WaterNetworkModel) -> pandas.DataFrame:
"""
获取管网模型的节点坐标
:param wn: 由wntr生成的模型
:return: 节点坐标
"""
# site: pandas.Series
# index节点名称wn.node_name_list
# values每个节点的坐标格式为 tuple如 (x, y) 或 (x, y, z)
site = wn.query_node_attribute('coordinates')
# Coor: pandas.Series
# index与site相同节点名称
# values坐标转换为numpy.ndarray如array([10.5, 20.3])
Coor = site.apply(lambda x: np.array(x)) # 将节点坐标转换为numpy数组
# x, y: list[float]
x = [] # 存储所有节点的 x 坐标
y = [] # 存储所有节点的 y 坐标
for i in range(0, len(Coor)):
x.append(Coor.values[i][0]) # 将 x 坐标存入 x 列表。
y.append(Coor.values[i][1]) # 将 y 坐标存入 y 列表
# xy: dict[str, list], x、y 坐标的字典
xy = {'x': x, 'y': y}
# Coor_node: pandas.DataFrame, 存储节点 x, y 坐标的 DataFrame
Coor_node = pd.DataFrame(xy, index=wn.node_name_list, columns=['x', 'y'])
return Coor_node
# 2025/03/12
# Step2: KMeans 聚类
# 将节点用kmeans根据坐标分为k组存入字典g
def kgroup(coor: pandas.DataFrame, knum: int) -> dict[int, list[str]]:
"""
使用KMeans聚类将节点坐标分组
:param coor: 存储所有节点的坐标数据
:param knum: 需要分成的聚类数
:return: 聚类结果字典
"""
g = {}
# estimator: sklearn.cluster.KMeans,KMeans 聚类模型
estimator = KMeans(n_clusters=knum)
estimator.fit(coor)
# label_pred: numpy.ndarrayint,每个点的类别标签
label_pred = estimator.labels_
for i in range(0, knum):
g[i] = coor[label_pred == i].index.tolist()
return g
# 2025/03/12
# Step3: wn_func类水力计算
# wn_func 主要用于计算:
# 水力距离hydraulic length即节点之间的水力阻力。
# 灵敏度分析sensitivity analysis用于优化测压点的布置。
# 一些与水力相关的函数,包括 CtoS求水力距离,stafun求状态函数F
# # diff求F对P的导数返回灵敏度矩阵A
# # sensitivity返回灵敏度和总灵敏度
class wn_func(object):
# Step3.1: 初始化
def __init__(self, wn: wntr.network.WaterNetworkModel):
"""
获取管网模型信息
:param wn: 由wntr生成的模型
"""
# self.results: wntr.sim.results.SimulationResults,仿真结果,包含压力、流量、水头等数据
self.results = wntr.sim.EpanetSimulator(wn).run_sim() # 存储运行结果
self.wn = wn
# self.qpandas.DataFrame,管道流量,索引为时间步长,列为管道名称
self.q = self.results.link['flowrate']
# ReservoirIndex / Tankindex: list[str],水库 / 水箱节点名称列表
ReservoirIndex = wn.reservoir_name_list
Tankindex = wn.tank_name_list
# 删除水库节点,删除与直接水库相连的虚拟管道
# self.pipes: list[str],所有管道的名称
self.pipes = wn.pipe_name_list
# self.nodes: list[str],所有节点的名称
self.nodes = wn.node_name_list
# self.coordinatespandas.Series,节点坐标,索引为节点名,值为 (x, y) 坐标的 tuple
self.coordinates = wn.query_node_attribute('coordinates')
# allpumps / allvalves: list[str],所有泵/阀门名称列表
allpumps = wn.pump_name_list
allvalves = wn.valve_name_list
# pumpstnode / pumpednode / valvestnode / valveednode: list[str],存储泵和阀门 起终点节点的名称
pumpstnode = []
pumpednode = []
valvestnode = []
valveednode = []
# Reservoirpipe / Reservoirednode: list[str],记录与水库相关的管道和节点
Reservoirpipe = []
Reservoirednode = []
for pump in allpumps:
pumpstnode.append(wn.links[pump].start_node.name)
pumpednode.append(wn.links[pump].end_node.name)
for valve in allvalves:
valvestnode.append(wn.links[valve].start_node.name)
valveednode.append(wn.links[valve].end_node.name)
for pipe in self.pipes:
if wn.links[pipe].start_node.name in ReservoirIndex:
Reservoirpipe.append(pipe)
Reservoirednode.append(wn.links[pipe].end_node.name)
if wn.links[pipe].start_node.name in Tankindex:
Reservoirpipe.append(pipe)
Reservoirednode.append(wn.links[pipe].end_node.name)
if wn.links[pipe].end_node.name in Tankindex:
Reservoirpipe.append(pipe)
Reservoirednode.append(wn.links[pipe].start_node.name)
# 泵的起终点、tank、reservoir
# self.delnodes: list[str],需要删除的节点(包括水库、泵、阀门连接的节点)
self.delnodes = list(
set(ReservoirIndex).union(Tankindex, pumpstnode, pumpednode, valvestnode, valveednode, Reservoirednode))
# 泵、起终点为tank、reservoir的管道
# self.delpipes: list[str],需要删除的管道(包括水库、泵、阀门连接的管道)
self.delpipes = list(set(wn.pump_name_list).union(wn.valve_name_list).union(Reservoirpipe))
self.pipes = [pipe for pipe in wn.pipe_name_list if pipe not in self.delpipes]
# self.L: list[float],所有管道的长度(以米为单位)
self.L = wn.query_link_attribute('length')[self.pipes].tolist()
self.n = len(self.nodes)
self.m = len(self.pipes)
# self.unit_headloss: list[float],单位水头损失headloss 数据的第一行,单位:米/km
self.unit_headloss = self.results.link['headloss'].iloc[0, :].tolist()
##
self.delnodes1 = list(set(ReservoirIndex).union(Tankindex))
# Step3.2: 计算水力距离
def CtoS(self):
"""
计算水力距离矩阵
:return:
"""
# 水力距离:当行索引对应的节点为控制点时,列索引对应的节点距离控制点的(路径*水头损失)的最小值
# nodeslist[str](节点名称)
nodes = copy.deepcopy(self.nodes)
# pipeslist[str](管道名称)
pipes = self.pipes
wn = self.wn
# n / mint节点数 / 管道数)
n = self.n
m = self.m
s1 = [0] * m
q = self.q
L = self.L
# H1pandas.DataFrame,水头数据,索引为时间步长,列为节点名
H1 = self.results.node['head'].T
# hhlist[float],计算管道两端水头之差
hh = []
# 水头损失
for p in pipes:
h1 = self.wn.links[p].start_node.name
h1 = H1.loc[str(h1)]
h2 = self.wn.links[p].end_node.name
h2 = H1.loc[str(h2)]
hh.append(abs(h1 - h2))
hh = np.array(hh)
# headlosspandas.DataFrame,管道水头损失矩阵
headloss = pd.DataFrame(hh, index=pipes).T
# s1:管道阻力系数s2将管道阻力系数与管道的起始节点和终止节点对应
hf = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
weightL = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
# s2为对应管道起始节点与终止节点的粗糙度系数矩阵index代表起始节点columns代表终止节点
G = nx.DiGraph()
for i in range(0, m):
pipe = pipes[i]
a = wn.links[pipe].start_node.name
b = wn.links[pipe].end_node.name
if q.loc[0, pipe] > 0:
hf.loc[a, b] = headloss.loc[0, pipe]
weightL.loc[a, b] = headloss.loc[0, pipe] * L[i]
G.add_weighted_edges_from([(a, b, weightL.loc[a, b])])
else:
hf.loc[b, a] = headloss.loc[0, pipe]
weightL.loc[b, a] = headloss.loc[0, pipe] * L[i]
G.add_weighted_edges_from([(b, a, weightL.loc[b, a])])
hydraulicL = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
for a in nodes:
if a in G.nodes:
d = nx.shortest_path_length(G, source=a, weight='weight')
for b in list(d.keys()):
hydraulicL.loc[a, b] = d[b]
hydraulicL = hydraulicL.drop(self.delnodes)
hydraulicL = hydraulicL.drop(self.delnodes, axis=1)
# 求加权水力距离
return hydraulicL, G
# Step3.3: 计算灵敏度矩阵
# 获取关系矩阵
def get_Conn(self):
"""
计算管网连接关系矩阵
:return:
"""
m = self.wn.num_links
n = self.wn.num_nodes
p = self.wn.num_pumps
v = self.wn.num_valves
self.nonjunc_index = []
self.non_link_index = []
for r in self.wn.reservoirs():
self.nonjunc_index.append(r[0])
for t in self.wn.tanks():
self.nonjunc_index.append(t[0])
# Connnumpy.matrix节点-管道连接矩阵,起点 -1终点 1
Conn = np.mat(np.zeros([n, m - p - v])) # 节点和管道的关系矩阵,行为节点,列为管道,起点为-1终点为1
# NConnnumpy.matrix节点-节点连接矩阵,有管道相连的地方设为 1
NConn = np.mat(np.zeros([n, n])) # 节点之间的关系之间有管道为1反之为0
# pipeslist[str],去除泵和阀门的管道列表
pipes = [pipe for pipe in self.wn.pipes() if pipe not in self.wn.pumps() and pipe not in self.wn.valves()]
for pipe_name, pipe in pipes:
start = self.wn.node_name_list.index(pipe.start_node_name)
end = self.wn.node_name_list.index(pipe.end_node_name)
p_index = self.wn.link_name_list.index(pipe_name)
Conn[start, p_index] = -1
Conn[end, p_index] = 1
NConn[start, end] = 1
NConn[end, start] = 1
self.A = Conn
link_name_list = [link for link in self.wn.link_name_list if
link not in self.wn.pump_name_list and link not in self.wn.valve_name_list]
self.A2 = pd.DataFrame(self.A, index=self.wn.node_name_list, columns=link_name_list)
self.A2 = self.A2.drop(self.delnodes)
for pipe in self.delpipes:
if pipe not in self.wn.pump_name_list and pipe not in self.wn.valve_name_list:
self.A2 = self.A2.drop(columns=pipe)
self.junc_list = self.A2.index
self.A2 = np.mat(self.A2) # 节点管道关系
self.A3 = NConn
def Jaco(self, hL: pandas.DataFrame):
"""
计算灵敏度矩阵(节点压力对粗糙度变化的响应)
:param hL: 水力距离矩阵
:return:
"""
# global result
# Anumpy.matrix, 节点-管道关系矩阵
A = self.A2
wn = self.wn
try:
result = wntr.sim.EpanetSimulator(wn).run_sim()
except EpanetException:
pass
finally:
h = result.link['headloss'][self.pipes].values[0]
q = result.link['flowrate'][self.pipes].values[0]
l = self.wn.query_link_attribute('length')[self.pipes]
C = self.wn.query_link_attribute('roughness')[self.pipes]
# headlossnumpy.ndarray,水头损失数组
headloss = np.array(h)
# 调整流量方向
for i in range(0, len(q)):
if q[i] < 0:
A[:, i] = -A[:, i]
# qnumpy.ndarray,流量数组
q = np.abs(q)
# 两个灵敏度矩阵
# B / Snumpy.matrix,灵敏度计算的中间矩阵
B = np.mat(np.diag(q / ((1.852 * headloss) + 1e-10)))
S = np.mat(np.diag(q / C))
# Xnumpy.matrix, 灵敏度矩阵
X = A * B * A.T
try:
det = np.linalg.det(X)
except RuntimeError as e:
sign, logdet = slogdet(X) # 防止溢出
det = sign * np.exp(logdet)
if det != 0:
J_H_Cw = X.I * A * S
# J_H_Q = -X.I
J_q_Cw = S - B * A.T * X.I * A * S # 去掉了delnodes和delpipes
# J_q_Q = B * A.T * X.I
else: # 当X不可逆
J_H_Cw = np.linalg.pinv(X) @ A @ S
# J_H_Q = -np.linalg.pinv(X)
J_q_Cw = S - B * A.T * np.linalg.pinv(X) * A * S
# J_q_Q = B * A.T * np.linalg.pinv(X)
Sen_pressure = []
S_pressure = np.abs(J_H_Cw).sum(axis=1).tolist() # 修改为绝对值
for ss in S_pressure:
Sen_pressure.append(ss[0])
# 求总灵敏度
SS_pressure = copy.deepcopy(hL)
for i in range(0, len(Sen_pressure)):
SS_pressure.iloc[i, :] = SS_pressure.iloc[i, :] * Sen_pressure[i]
SS = copy.deepcopy(hL)
for i in range(0, len(Sen_pressure)):
SS.iloc[i, :] = SS.iloc[i, :] * Sen_pressure[i]
# SS[i,j]:节点nodes[i]的灵敏度*该节点到nodes[j]的水力距离
return SS
# 2025/03/12
# Step4: 传感器布置优化
# Sensorplacement
# weight分配权重
# sensor传感器布置的位置
class Sensorplacement(wn_func):
"""
Sensorplacement 类继承了 wn_func 类,并且用于计算和优化传感器布置的位置。
"""
def __init__(self, wn: wntr.network.WaterNetworkModel, sensornum: int):
"""
:param wn: 由wntr生成的模型
:param sensornum: 传感器的数量
"""
wn_func.__init__(self, wn)
self.sensornum = sensornum
# 1.某个节点到所有节点的加权距离之和
# 2.某个节点到该组内所有节点的加权距离之和
def sensor(self, SS: pandas.DataFrame, G: networkx.Graph, group: dict[int, list[str]]):
"""
sensor 方法是用来根据灵敏度矩阵 SS 和加权图 G 来确定传感器布置位置的
:param SS: 灵敏度矩阵每个节点的行和列代表不同节点矩阵元素表示节点间的灵敏度。SS.iloc[i, :] 表示第 i 行对应节点 i 到所有其他节点的灵敏度
:param G: 加权图,表示管网的拓扑结构,每个节点通过管道连接。图的边的权重通常是根据水力距离或者流量等计算的
:param group: 节点分组,字典的键是分组编号,值是该组的节点名称列表
:return:
"""
# 传感器布置个数以及位置
# W = self.weight()
n = self.n - len(self.delnodes)
nodes = copy.deepcopy(self.nodes)
for node in self.delnodes:
nodes.remove(node)
# sumSSlist[float],每个节点到其他节点的灵敏度之和。SS.iloc[i, :] 返回第 i 个节点与所有其他节点的灵敏度值sum(SS.iloc[i, :]) 计算这些灵敏度值的总和。
sumSS = []
for i in range(0, n):
sumSS.append(sum(SS.iloc[i, :]))
# 一个整数范围表示每个节点的索引用作sumSS_ DataFrame的索引
indices = range(0, n)
# sumSS_pandas.DataFrame,将 sumSS 转换成 DataFrame 格式,并且将节点的总灵敏度保存到 CSV 文件 sumSS_data.csv 中
sumSS_ = pd.DataFrame(np.array(sumSS), index=indices)
sumSS_.to_csv('sumSS_data.csv') # 存储节点总灵敏度
# sumSSpandas.DataFrame,sumSS 被转换为 DataFrame 类型并且按总灵敏度即灵敏度之和降序排列。此时sumSS 是按节点的灵敏度之和排序的 DataFrame
sumSS = pd.DataFrame(np.array(sumSS), index=nodes)
sumSS = sumSS.sort_values(by=[0], ascending=[False])
# sensorindexlist[str],用于存储根据灵敏度排序选出的传感器位置的节点名称,存储根据总灵敏度排序的节点列表,用于传感器布置
sensorindex = []
# sensorindex_2list[str],用于存储每组内根据灵敏度排序选出的传感器位置的节点名称,存储每个组内根据灵敏度排序选择的传感器节点
sensorindex_2 = []
# group_Sdict[int, pandas.DataFrame],存储每个组内的灵敏度矩阵
group_S = {}
# group_sumSSdict[int, list[float]],存储每个组内节点的总灵敏度,值为每个组内节点灵敏度之和的列表
group_sumSS = {}
for i in range(0, len(group)):
for node in self.delnodes:
# 这里的group[i]是每个组的节点列表代码首先去除已经被标记为删除的节点self.delnodes
if node in group[i]:
group[i].remove(node)
group_S[i] = SS.loc[group[i], group[i]]
# 对每个组内的节点计算组内节点的总灵敏度group_sumSS[i])。它将每个组内节点的灵敏度值相加,并且按灵敏度降序排序
group_sumSS[i] = []
for j in range(0, len(group[i])):
group_sumSS[i].append(sum(group_S[i].iloc[j, :]))
group_sumSS[i] = pd.DataFrame(np.array(group_sumSS[i]), index=group[i])
group_sumSS[i] = group_sumSS[i].sort_values(by=[0], ascending=[False])
pass
# 1.选sumSS最大的节点然后把这个节点所在的那个组删掉就可以不再从这个组选点。再重新排序选sumSS最大的;
# 2.在每组内选group_sumSS最大的节点
# 在这个循环中首先选择灵敏度最高的节点Smaxnode并添加到sensorindex。然后根据灵敏度排序删除已选的节点并继续选择下一个灵敏度最大的节点。这个过程用于选择传感器的位置
sensornum = self.sensornum
for i in range(0, sensornum):
# Smaxnodestr,最大灵敏度节点sumSS.index[0] 表示灵敏度最高的节点
Smaxnode = sumSS.index[0]
sensorindex.append(Smaxnode)
sensorindex_2.append(group_sumSS[i].index[0])
for key, value in group.items():
if Smaxnode in value:
sumSS = sumSS.drop(index=group[key])
continue
sumSS = sumSS.sort_values(by=[0], ascending=[False])
return sensorindex, sensorindex_2
# 2025/03/13
def get_sensor_coord(name: str, sensor_num: int) -> dict[str, float]:
"""
获取布置测压点的坐标,初始测压点布置根据灵敏度来布置计算初始情况下的校准过程的error
:param name: 数据库名称
:param sensor_num: 测压点数目
:return: 测压点坐标字典
"""
# inp_file_realstr,输入文件名,表示原始水力模型文件的路径,该文件格式为 EPANET 输入文件(.inp包含管网的结构信息、节点、管道、泵等数据
inp_file_real = f'./db_inp/{name}.db.inp'
# sensornumint,需要布置的传感器数量
# sensornum = sensor_num
# wn_realwntr.network.WaterNetworkModel,加载 EPANET 水力模型
wn_real = wntr.network.WaterNetworkModel(inp_file_real) # 真实粗糙度的原始管网
# sim_realwntr.sim.EpanetSimulator,创建一个水力仿真器对象
sim_real = wntr.sim.EpanetSimulator(wn_real)
# results_realwntr.sim.results.SimulationResults,运行仿真并返回结果
results_real = sim_real.run_sim()
# real_Clist[float],包含所有管道粗糙度的列表
real_C = wn_real.query_link_attribute('roughness').tolist()
# wn_fun1wn_func继承自 object创建 wn_func 类的实例,传入 wn_real 水力模型对象。wn_func 用于计算管网相关的水力属性,比如水力距离、灵敏度等
wn_fun1 = wn_func(wn_real)
# nodeslist[str],管网的节点名称列表
nodes = wn_fun1.nodes
# delnodeslist[str],被删除的节点(如水库、泵、阀门连接的节点等)
delnodes = wn_fun1.delnodes
# Coor_nodepandas.DataFrame
Coor_node = getCoor(wn_real)
Coor_node = Coor_node.drop(wn_fun1.delnodes)
nodes = [node for node in wn_fun1.nodes if node not in delnodes]
# coordinatespandas.Series存储所有节点的坐标类型为 Series索引为节点名称值为 (x, y) 坐标对
coordinates = wn_fun1.coordinates
# 随机产生监测点
# junctionnumintnodes 的长度,表示节点的数量
junctionnum = len(nodes)
# random_numberslist[int],使用 random.sample 随机选择 sensornum20个节点的编号。它返回一个不重复的随机编号列表
# random_numbers = random.sample(range(junctionnum), sensor_num)
# for i in range(sensor_num):
# # print(random_numbers[i])
wn_fun1.get_Conn()
# hLpandas.DataFrame水力距离矩阵表示每个节点到其他节点的水力阻力
# Gnetworkx.DiGraph加权有向图表示管网的拓扑结构节点之间的边带有权重
hL, G = wn_fun1.CtoS()
# SSpandas.DataFrame灵敏度矩阵表示每个节点对管网变化如粗糙度、流量等的响应
SS = wn_fun1.Jaco(hL)
# groupdict[int, list[str]],使用 kgroup 函数将节点按坐标分成若干组每组包含的节点数不一定相同。group 是一个字典,键为分组编号,值为节点名列表
group = kgroup(Coor_node, sensor_num)
# wn_funSensorplacement继承自wn_func
# 创建Sensorplacement类的实例传入水力网络模型wn_real和传感器数量sensornum。Sensorplacement用于计算和布置传感器
wn_fun = Sensorplacement(wn_real, sensor_num)
wn_fun.__dict__.update(wn_fun1.__dict__)
# sensorindexlist[str],初始传感器布置位置的节点名称
# sensorindex_2list[str],根据分组选择的传感器位置
sensorindex, sensorindex_2 = wn_fun.sensor(SS, G, group) # 初始的sensorindex
# print(str(sensor_num), "个测压点,测压点位置:", sensorindex)
sensor_coord = {}
# 重新打开数据库
if is_project_open(name=name):
close_project(name=name)
open_project(name=name)
for node_id in sensorindex:
sensor_coord[node_id] = get_node_coord(name=name, node_id=node_id)
close_project(name=name)
# print(sensor_coord)
return sensor_coord
if __name__ == '__main__':
sensor_coord = get_sensor_coord(name=project_info.name, sensor_num=20)
print(sensor_coord)
# '''
# 初始测压点布置根据灵敏度来布置计算初始情况下的校准过程的error
# '''
#
# # inp_file_realstr,输入文件名,表示原始水力模型文件的路径,该文件格式为 EPANET 输入文件(.inp包含管网的结构信息、节点、管道、泵等数据
# inp_file_real = './db_inp/bb.db.inp'
# # sensornumint,需要布置的传感器数量
# sensornum = 20
# # wn_realwntr.network.WaterNetworkModel,加载 EPANET 水力模型
# wn_real = wntr.network.WaterNetworkModel(inp_file_real) # 真实粗糙度的原始管网
# # sim_realwntr.sim.EpanetSimulator,创建一个水力仿真器对象
# sim_real = wntr.sim.EpanetSimulator(wn_real)
# # results_realwntr.sim.results.SimulationResults,运行仿真并返回结果
# results_real = sim_real.run_sim()
#
# # real_Clist[float],包含所有管道粗糙度的列表
# real_C = wn_real.query_link_attribute('roughness').tolist()
# # wn_fun1wn_func继承自 object创建 wn_func 类的实例,传入 wn_real 水力模型对象。wn_func 用于计算管网相关的水力属性,比如水力距离、灵敏度等
# wn_fun1 = wn_func(wn_real)
# # nodeslist[str],管网的节点名称列表
# nodes = wn_fun1.nodes
# # delnodeslist[str],被删除的节点(如水库、泵、阀门连接的节点等)
# delnodes = wn_fun1.delnodes
# # Coor_nodepandas.DataFrame
# Coor_node = getCoor(wn_real)
# Coor_node = Coor_node.drop(wn_fun1.delnodes)
# nodes = [node for node in wn_fun1.nodes if node not in delnodes]
# # coordinatespandas.Series存储所有节点的坐标类型为 Series索引为节点名称值为 (x, y) 坐标对
# coordinates = wn_fun1.coordinates
#
# # 随机产生监测点
# # junctionnumintnodes 的长度,表示节点的数量
# junctionnum = len(nodes)
# # random_numberslist[int],使用 random.sample 随机选择 sensornum20个节点的编号。它返回一个不重复的随机编号列表
# random_numbers = random.sample(range(junctionnum), sensornum)
# for i in range(sensornum):
# print(random_numbers[i])
#
# wn_fun1.get_Conn()
# # hLpandas.DataFrame水力距离矩阵表示每个节点到其他节点的水力阻力
# # Gnetworkx.DiGraph加权有向图表示管网的拓扑结构节点之间的边带有权重
# hL, G = wn_fun1.CtoS()
# # SSpandas.DataFrame灵敏度矩阵表示每个节点对管网变化如粗糙度、流量等的响应
# SS = wn_fun1.Jaco(hL)
# # groupdict[int, list[str]],使用 kgroup 函数将节点按坐标分成若干组每组包含的节点数不一定相同。group 是一个字典,键为分组编号,值为节点名列表
# group = kgroup(Coor_node, sensornum)
# # wn_funSensorplacement继承自wn_func
# # 创建Sensorplacement类的实例传入水力网络模型wn_real和传感器数量sensornum。Sensorplacement用于计算和布置传感器
# wn_fun = Sensorplacement(wn_real, sensornum)
# wn_fun.__dict__.update(wn_fun1.__dict__)
# # sensorindexlist[str],初始传感器布置位置的节点名称
# # sensorindex_2list[str],根据分组选择的传感器位置
# sensorindex, sensorindex_2 = wn_fun.sensor(SS, G, group) # 初始的sensorindex
# print(str(sensornum), "个测压点,测压点位置:", sensorindex)
# # 分区画图
# colorlist = ['lightpink', 'coral', 'rosybrown', 'olive', 'powderblue', 'lightskyblue', 'steelblue', 'peachpuff','brown','silver','indigo','lime','gold','violet','maroon','navy','teal','magenta','cyan',
# 'burlywood', 'tan', 'slategrey', 'thistle', 'lightseagreen', 'lightgreen', 'red','blue','yellow','orange','purple','grey','green','pink','lightblue','beige','chartreuse','turquoise','lavender','fuchsia','coral']
# G = wn_real.to_graph()
# G = G.to_undirected() # 变为无向图
# pos = nx.get_node_attributes(G, 'pos')
# pass
# for i in range(0, sensornum):
# ax = plt.gca()
# ax.set_title(inp_file_real + str(sensornum))
# nodes = nx.draw_networkx_nodes(G, pos, nodelist=group[i], node_color=colorlist[i], node_size=20)
# nodes = nx.draw_networkx_nodes(G, pos,
# nodelist=sensorindex_2, node_color='black', node_size=70, node_shape='*'
# )
# edges = nx.draw_networkx_edges(G, pos)
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# ax.spines['left'].set_visible(False)
# plt.savefig(inp_file_real + str(sensornum) + ".png")
# plt.show()
#
# wntr.graphics.plot_network(wn_real, node_attribute=sensorindex_2, node_size=50, node_labels=False,
# title=inp_file_real + '_Projetion' + str(sensornum))
# plt.savefig(inp_file_real + '_S' + str(sensornum) + ".png")
# plt.show()