重构现代化 FastAPI 后端项目框架

This commit is contained in:
2026-01-21 16:50:57 +08:00
parent 9e06e68a15
commit c56f2fd1db
352 changed files with 176 additions and 70 deletions

View File

@@ -0,0 +1,289 @@
# ...existing code...
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pykalman import KalmanFilter
import os
def clean_flow_data_kf(input_csv_path: str, show_plot: bool = False) -> str:
"""
读取 input_csv_path 中的每列时间序列,使用一维 Kalman 滤波平滑并用预测值替换基于 3σ 检测出的异常点。
保存输出为:<input_filename>_cleaned.xlsx与输入同目录并返回输出文件的绝对路径。
仅保留输入文件路径作为参数(按要求)。
"""
# 读取 CSV
data = pd.read_csv(input_csv_path, header=0, index_col=None, encoding="utf-8")
# 存储 Kalman 平滑结果
data_kf = pd.DataFrame(index=data.index, columns=data.columns)
# 平滑每一列
for col in data.columns:
observations = pd.Series(data[col].values).ffill().bfill()
if observations.isna().any():
observations = observations.fillna(observations.mean())
obs = observations.values.astype(float)
kf = KalmanFilter(
transition_matrices=[1],
observation_matrices=[1],
initial_state_mean=float(obs[0]),
initial_state_covariance=1,
observation_covariance=1,
transition_covariance=0.01,
)
# 跳过EM学习使用固定参数以提高性能
state_means, _ = kf.smooth(obs)
data_kf[col] = state_means.flatten()
# 计算残差并用IQR检测异常更稳健的方法
residuals = data - data_kf
residual_thresholds = {}
for col in data.columns:
res_values = residuals[col].dropna().values # 移除NaN以计算IQR
q1 = np.percentile(res_values, 25)
q3 = np.percentile(res_values, 75)
iqr = q3 - q1
lower_threshold = q1 - 1.5 * iqr
upper_threshold = q3 + 1.5 * iqr
residual_thresholds[col] = (lower_threshold, upper_threshold)
cleaned_data = data.copy()
anomalies_info = {}
for col in data.columns:
lower, upper = residual_thresholds[col]
sensor_residuals = residuals[col]
anomaly_mask = (sensor_residuals < lower) | (sensor_residuals > upper)
anomaly_idx = data.index[anomaly_mask.fillna(False)]
anomalies_info[col] = pd.DataFrame(
{
"Observed": data.loc[anomaly_idx, col],
"Kalman_Predicted": data_kf.loc[anomaly_idx, col],
"Residual": sensor_residuals.loc[anomaly_idx],
}
)
cleaned_data.loc[anomaly_idx, f"{col}_cleaned"] = data_kf.loc[anomaly_idx, col]
# 构造输出文件名:在输入文件名基础上加后缀 _cleaned.xlsx
input_dir = os.path.dirname(os.path.abspath(input_csv_path))
input_base = os.path.splitext(os.path.basename(input_csv_path))[0]
output_filename = f"{input_base}_cleaned.xlsx"
output_path = os.path.join(input_dir, output_filename)
# 覆盖同名文件
if os.path.exists(output_path):
os.remove(output_path)
cleaned_data.to_excel(output_path, index=False)
# 可选可视化(第一个传感器)
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
if show_plot and len(data.columns) > 0:
sensor_to_plot = data.columns[0]
plt.figure(figsize=(12, 6))
plt.plot(
data.index,
data[sensor_to_plot],
label="监测值",
marker="o",
markersize=3,
alpha=0.7,
)
plt.plot(
data.index, data_kf[sensor_to_plot], label="Kalman滤波预测值", linewidth=2
)
anomaly_idx = anomalies_info[sensor_to_plot].index
if len(anomaly_idx) > 0:
plt.plot(
anomaly_idx,
data[sensor_to_plot].loc[anomaly_idx],
"ro",
markersize=8,
label="监测值异常点",
)
plt.plot(
anomaly_idx,
data_kf[sensor_to_plot].loc[anomaly_idx],
"go",
markersize=8,
label="Kalman修复值",
)
plt.xlabel("时间点(序号)")
plt.ylabel("监测值")
plt.title(f"{sensor_to_plot}观测值与Kalman滤波预测值异常点标记")
plt.legend()
plt.show()
# 返回输出文件的绝对路径
return os.path.abspath(output_path)
def clean_flow_data_df_kf(data: pd.DataFrame, show_plot: bool = False) -> dict:
"""
接收一个 DataFrame 数据结构,使用一维 Kalman 滤波平滑并用预测值替换基于 IQR 检测出的异常点。
区分合理的0值流量转换和异常的0值连续多个0或孤立0
返回完整的清洗后的字典数据结构。
"""
# 使用传入的 DataFrame
data = data.copy()
# 替换0值填充NaN值
data_filled = data.replace(0, np.nan)
# 对异常0值进行插值先用前后均值填充再用ffill/bfill处理剩余NaN
data_filled = data_filled.interpolate(method="linear", limit_direction="both")
# 处理剩余的0值和NaN值
data_filled = data_filled.ffill().bfill()
# 存储 Kalman 平滑结果
data_kf = pd.DataFrame(index=data_filled.index, columns=data_filled.columns)
# 平滑每一列
for col in data_filled.columns:
observations = pd.Series(data_filled[col].values).ffill().bfill()
if observations.isna().any():
observations = observations.fillna(observations.mean())
obs = observations.values.astype(float)
kf = KalmanFilter(
transition_matrices=[1],
observation_matrices=[1],
initial_state_mean=float(obs[0]),
initial_state_covariance=1,
observation_covariance=10,
transition_covariance=10,
)
state_means, _ = kf.smooth(obs)
data_kf[col] = state_means.flatten()
# 计算残差并用IQR检测异常
residuals = data_filled - data_kf
residual_thresholds = {}
for col in data_filled.columns:
res_values = residuals[col].dropna().values
q1 = np.percentile(res_values, 25)
q3 = np.percentile(res_values, 75)
iqr = q3 - q1
lower_threshold = q1 - 1.5 * iqr
upper_threshold = q3 + 1.5 * iqr
residual_thresholds[col] = (lower_threshold, upper_threshold)
# 创建完整的修复数据
cleaned_data = data_filled.copy()
anomalies_info = {}
for col in data_filled.columns:
lower, upper = residual_thresholds[col]
sensor_residuals = residuals[col]
anomaly_mask = (sensor_residuals < lower) | (sensor_residuals > upper)
anomaly_idx = data_filled.index[anomaly_mask.fillna(False)]
anomalies_info[col] = pd.DataFrame(
{
"Observed": data_filled.loc[anomaly_idx, col],
"Kalman_Predicted": data_kf.loc[anomaly_idx, col],
"Residual": sensor_residuals.loc[anomaly_idx],
}
)
# 直接在原列上替换异常值为 Kalman 预测值
cleaned_data.loc[anomaly_idx, col] = data_kf.loc[anomaly_idx, col]
# 可选可视化
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
if show_plot and len(data.columns) > 0:
sensor_to_plot = data.columns[0]
plt.figure(figsize=(12, 8))
plt.subplot(2, 1, 1)
plt.plot(
data.index,
data[sensor_to_plot],
label="原始监测值",
marker="o",
markersize=3,
alpha=0.7,
)
abnormal_zero_idx = data.index[data_filled[sensor_to_plot].isna()]
if len(abnormal_zero_idx) > 0:
plt.plot(
abnormal_zero_idx,
data[sensor_to_plot].loc[abnormal_zero_idx],
"mo",
markersize=8,
label="异常0值",
)
plt.plot(
data.index, data_kf[sensor_to_plot], label="Kalman滤波预测值", linewidth=2
)
anomaly_idx = anomalies_info[sensor_to_plot].index
if len(anomaly_idx) > 0:
plt.plot(
anomaly_idx,
data_filled[sensor_to_plot].loc[anomaly_idx],
"ro",
markersize=8,
label="IQR异常点",
)
plt.xlabel("时间点(序号)")
plt.ylabel("流量值")
plt.title(f"{sensor_to_plot}:原始数据与异常检测")
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(
data.index,
cleaned_data[sensor_to_plot],
label="修复后监测值",
marker="o",
markersize=3,
color="green",
)
plt.xlabel("时间点(序号)")
plt.ylabel("流量值")
plt.title(f"{sensor_to_plot}:修复后数据")
plt.legend()
plt.tight_layout()
plt.show()
# 返回完整的修复后字典
return cleaned_data
# # 测试
# if __name__ == "__main__":
# # 默认:脚本目录下同名 CSV 文件
# script_dir = os.path.dirname(os.path.abspath(__file__))
# default_csv = os.path.join(script_dir, "pipe_flow_data_to_clean2.0.csv")
# out = clean_flow_data_kf(default_csv)
# print("清洗后的数据已保存到:", out)
# 测试 clean_flow_data_dict 函数
if __name__ == "__main__":
import random
# 读取 szh_flow_scada.csv 文件
script_dir = os.path.dirname(os.path.abspath(__file__))
csv_path = os.path.join(script_dir, "szh_flow_scada.csv")
data = pd.read_csv(csv_path, header=0, index_col=None, encoding="utf-8")
# 排除 Time 列,随机选择 5 列
columns_to_exclude = ["Time"]
available_columns = [col for col in data.columns if col not in columns_to_exclude]
selected_columns = random.sample(available_columns, 1)
# 将选中的列转换为字典
data_dict = {col: data[col].tolist() for col in selected_columns}
print("选中的列:", selected_columns)
print("原始数据长度:", len(data_dict[selected_columns[0]]))
# 调用函数进行清洗
cleaned_dict = clean_flow_data_df_kf(data_dict, show_plot=True)
# 将清洗后的字典写回 CSV
out_csv = os.path.join(script_dir, f"{selected_columns[0]}_clean.csv")
pd.DataFrame(cleaned_dict).to_csv(out_csv, index=False, encoding="utf-8-sig")
print("已保存清洗结果到:", out_csv)
print("清洗后的字典键:", list(cleaned_dict.keys()))
print("清洗后的数据长度:", len(cleaned_dict[selected_columns[0]]))
print("测试完成:函数运行正常")

View File

@@ -0,0 +1,238 @@
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.impute import SimpleImputer
import os
def clean_pressure_data_km(input_csv_path: str, show_plot: bool = False) -> str:
"""
读取输入 CSV基于 KMeans 检测异常并用滚动平均修复。输出为 <input_basename>_cleaned.xlsx同目录
原始数据在 sheet 'raw_pressure_data',处理后数据在 sheet 'cleaned_pressusre_data'
返回输出文件的绝对路径。
"""
# 读取 CSV
input_csv_path = os.path.abspath(input_csv_path)
data = pd.read_csv(input_csv_path, header=0, index_col=None, encoding="utf-8")
# 标准化
data_norm = (data - data.mean()) / data.std()
# 聚类与异常检测
k = 3
kmeans = KMeans(n_clusters=k, init="k-means++", n_init=50, random_state=42)
clusters = kmeans.fit_predict(data_norm)
centers = kmeans.cluster_centers_
distances = np.linalg.norm(data_norm.values - centers[clusters], axis=1)
threshold = distances.mean() + 3 * distances.std()
anomaly_pos = np.where(distances > threshold)[0]
anomaly_indices = data.index[anomaly_pos]
anomaly_details = {}
for pos in anomaly_pos:
row_norm = data_norm.iloc[pos]
cluster_idx = clusters[pos]
center = centers[cluster_idx]
diff = abs(row_norm - center)
main_sensor = diff.idxmax()
anomaly_details[data.index[pos]] = main_sensor
# 修复:滚动平均(窗口可调)
data_rolled = data.rolling(window=13, center=True, min_periods=1).mean()
data_repaired = data.copy()
for pos in anomaly_pos:
label = data.index[pos]
sensor = anomaly_details[label]
data_repaired.loc[label, sensor] = data_rolled.loc[label, sensor]
# 可选可视化(使用位置作为 x 轴)
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
if show_plot and len(data.columns) > 0:
n = len(data)
time = np.arange(n)
plt.figure(figsize=(12, 8))
for col in data.columns:
plt.plot(time, data[col].values, marker="o", markersize=3, label=col)
for pos in anomaly_pos:
sensor = anomaly_details[data.index[pos]]
plt.plot(pos, data.iloc[pos][sensor], "ro", markersize=8)
plt.xlabel("时间点(序号)")
plt.ylabel("压力监测值")
plt.title("各传感器折线图(红色标记主要异常点)")
plt.legend()
plt.show()
plt.figure(figsize=(12, 8))
for col in data_repaired.columns:
plt.plot(
time, data_repaired[col].values, marker="o", markersize=3, label=col
)
for pos in anomaly_pos:
sensor = anomaly_details[data.index[pos]]
plt.plot(pos, data_repaired.iloc[pos][sensor], "go", markersize=8)
plt.xlabel("时间点(序号)")
plt.ylabel("修复后压力监测值")
plt.title("修复后各传感器折线图(绿色标记修复值)")
plt.legend()
plt.show()
# 保存到 Excel两个 sheet
input_dir = os.path.dirname(os.path.abspath(input_csv_path))
input_base = os.path.splitext(os.path.basename(input_csv_path))[0]
output_filename = f"{input_base}_cleaned.xlsx"
output_path = os.path.join(input_dir, output_filename)
if os.path.exists(output_path):
os.remove(output_path) # 覆盖同名文件
with pd.ExcelWriter(output_path, engine="openpyxl") as writer:
data.to_excel(writer, sheet_name="raw_pressure_data", index=False)
data_repaired.to_excel(writer, sheet_name="cleaned_pressusre_data", index=False)
# 返回输出文件的绝对路径
return os.path.abspath(output_path)
def clean_pressure_data_df_km(data: pd.DataFrame, show_plot: bool = False) -> dict:
"""
接收一个 DataFrame 数据结构使用KMeans聚类检测异常并用滚动平均修复。
返回清洗后的字典数据结构。
"""
# 使用传入的 DataFrame
data = data.copy()
# 填充NaN值
data = data.ffill().bfill()
# 异常值预处理
# 将0值替换为NaN然后用线性插值填充
data_filled = data.replace(0, np.nan)
data_filled = data_filled.interpolate(method="linear", limit_direction="both")
# 如果仍有NaN全为0的列用前后值填充
data_filled = data_filled.ffill().bfill()
# 标准化(使用填充后的数据)
data_norm = (data_filled - data_filled.mean()) / data_filled.std()
# 添加:处理标准化后的 NaN例如标准差为0的列防止异常数据时间段内所有数据都相同导致计算结果为 NaN
imputer = SimpleImputer(
strategy="constant", fill_value=0, keep_empty_features=True
) # 用 0 填充 NaN包括全 NaN并保留空特征
data_norm = pd.DataFrame(
imputer.fit_transform(data_norm),
columns=data_norm.columns,
index=data_norm.index,
)
# 聚类与异常检测
k = 3
kmeans = KMeans(n_clusters=k, init="k-means++", n_init=50, random_state=42)
clusters = kmeans.fit_predict(data_norm)
centers = kmeans.cluster_centers_
distances = np.linalg.norm(data_norm.values - centers[clusters], axis=1)
threshold = distances.mean() + 3 * distances.std()
anomaly_pos = np.where(distances > threshold)[0]
anomaly_indices = data.index[anomaly_pos]
anomaly_details = {}
for pos in anomaly_pos:
row_norm = data_norm.iloc[pos]
cluster_idx = clusters[pos]
center = centers[cluster_idx]
diff = abs(row_norm - center)
main_sensor = diff.idxmax()
anomaly_details[data.index[pos]] = main_sensor
# 修复:滚动平均(窗口可调)
data_rolled = data_filled.rolling(window=13, center=True, min_periods=1).mean()
data_repaired = data_filled.copy()
for pos in anomaly_pos:
label = data.index[pos]
sensor = anomaly_details[label]
data_repaired.loc[label, sensor] = data_rolled.loc[label, sensor]
# 可选可视化(使用位置作为 x 轴)
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
if show_plot and len(data.columns) > 0:
n = len(data)
time = np.arange(n)
plt.figure(figsize=(12, 8))
for col in data.columns:
plt.plot(
time, data[col].values, marker="o", markersize=3, label=col, alpha=0.5
)
for col in data_filled.columns:
plt.plot(
time,
data_filled[col].values,
marker="x",
markersize=3,
label=f"{col}_filled",
linestyle="--",
)
for pos in anomaly_pos:
sensor = anomaly_details[data.index[pos]]
plt.plot(pos, data_filled.iloc[pos][sensor], "ro", markersize=8)
plt.xlabel("时间点(序号)")
plt.ylabel("压力监测值")
plt.title("各传感器折线图红色标记主要异常点虚线为0值填充后")
plt.legend()
plt.show()
plt.figure(figsize=(12, 8))
for col in data_repaired.columns:
plt.plot(
time, data_repaired[col].values, marker="o", markersize=3, label=col
)
for pos in anomaly_pos:
sensor = anomaly_details[data.index[pos]]
plt.plot(pos, data_repaired.iloc[pos][sensor], "go", markersize=8)
plt.xlabel("时间点(序号)")
plt.ylabel("修复后压力监测值")
plt.title("修复后各传感器折线图(绿色标记修复值)")
plt.legend()
plt.show()
# 返回清洗后的字典
return data_repaired
# 测试
# if __name__ == "__main__":
# # 默认使用脚本目录下的 pressure_raw_data.csv
# script_dir = os.path.dirname(os.path.abspath(__file__))
# default_csv = os.path.join(script_dir, "pressure_raw_data.csv")
# out_path = clean_pressure_data_km(default_csv, show_plot=False)
# print("保存路径:", out_path)
# 测试 clean_pressure_data_dict_km 函数
if __name__ == "__main__":
import random
# 读取 szh_pressure_scada.csv 文件
script_dir = os.path.dirname(os.path.abspath(__file__))
csv_path = os.path.join(script_dir, "szh_pressure_scada.csv")
data = pd.read_csv(csv_path, header=0, index_col=None, encoding="utf-8")
# 排除 Time 列,随机选择 5 列
columns_to_exclude = ["Time"]
available_columns = [col for col in data.columns if col not in columns_to_exclude]
selected_columns = random.sample(available_columns, 5)
# 将选中的列转换为字典
data_dict = {col: data[col].tolist() for col in selected_columns}
print("选中的列:", selected_columns)
print("原始数据长度:", len(data_dict[selected_columns[0]]))
# 调用函数进行清洗
cleaned_dict = clean_pressure_data_df_km(data_dict, show_plot=True)
print("清洗后的字典键:", list(cleaned_dict.keys()))
print("清洗后的数据长度:", len(cleaned_dict[selected_columns[0]]))
print("测试完成:函数运行正常")

View File

@@ -0,0 +1,3 @@
from .Fdataclean import *
from .Pdataclean import *
from .pipeline_health_analyzer import *

View File

@@ -0,0 +1,109 @@
import wntr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn.cluster
import os
class QD_KMeans(object):
def __init__(self, wn, num_monitors):
# self.inp = inp
self.cluster_num = num_monitors # 聚类中心个数,也即测压点个数
self.wn=wn
self.monitor_nodes = []
self.coords = []
self.junction_nodes = {} # Added missing initialization
def get_junctions_coordinates(self):
for junction_name in self.wn.junction_name_list:
junction = self.wn.get_node(junction_name)
self.junction_nodes[junction_name] = junction.coordinates
self.coords.append(junction.coordinates )
# print(f"Total junctions: {self.junction_coordinates}")
def select_monitoring_points(self):
if not self.coords: # Add check if coordinates are collected
self.get_junctions_coordinates()
coords = np.array(self.coords)
coords_normalized = (coords - coords.min(axis=0)) / (coords.max(axis=0) - coords.min(axis=0))
kmeans = sklearn.cluster.KMeans(n_clusters= self.cluster_num, random_state=42)
kmeans.fit(coords_normalized)
for center in kmeans.cluster_centers_:
distances = np.sum((coords_normalized - center) ** 2, axis=1)
nearest_node = self.wn.junction_name_list[np.argmin(distances)]
self.monitor_nodes.append(nearest_node)
return self.monitor_nodes
def visualize_network(self):
"""Visualize network with monitoring points"""
ax=wntr.graphics.plot_network(self.wn,
node_attribute=self.monitor_nodes,
node_size=30,
title='Optimal sensor')
plt.show()
def kmeans_sensor_placement(name: str, sensor_num: int, min_diameter: int) -> list:
inp_name = f'./db_inp/{name}.db.inp'
wn= wntr.network.WaterNetworkModel(inp_name)
wn_cluster=QD_KMeans(wn, sensor_num)
# Select monitoring pointse
sensor_ids= wn_cluster.select_monitoring_points()
# wn_cluster.visualize_network()
return sensor_ids
if __name__ == "__main__":
#sensorindex = get_ID(name='suzhouhe_2024_cloud_0817', sensor_num=30, min_diameter=500)
sensorindex = kmeans_sensor_placement(name='szh', sensor_num=50, min_diameter=300)
print(sensorindex)

View File

@@ -0,0 +1,142 @@
import os
import joblib
import pandas as pd
import matplotlib.pyplot as plt
class PipelineHealthAnalyzer:
"""
管道健康分析器类,使用随机生存森林模型预测管道的生存概率。
该类封装了模型加载和预测功能,便于在其他项目中复用。
模型基于4个特征进行生存分析预测材料、直径、流速、压力。
使用前需确保安装依赖joblib, pandas, numpy, scikit-survival, matplotlib。
"""
def __init__(self, model_path: str = "model/my_survival_forest_model_quxi.joblib"):
"""
初始化分析器,加载预训练的随机生存森林模型。
:param model_path: 模型文件的路径(默认为相对路径 'model/my_survival_forest_model_quxi.joblib')。
:raises FileNotFoundError: 如果模型文件不存在。
:raises Exception: 如果模型加载失败。
"""
# 确保 model 目录存在
model_dir = os.path.dirname(model_path)
if model_dir and not os.path.exists(model_dir):
os.makedirs(model_dir, exist_ok=True)
if not os.path.exists(model_path):
raise FileNotFoundError(f"模型文件未找到: {model_path}")
try:
self.rsf = joblib.load(model_path)
self.features = [
"Material",
"Diameter",
"Flow Velocity",
"Pressure", # 'Temperature', 'Precipitation',
# 'Location', 'Structural Defects', 'Functional Defects'
]
except Exception as e:
raise Exception(f"加载模型时出错: {str(e)}")
def predict_survival(self, data: pd.DataFrame) -> list:
"""
基于输入数据预测生存函数。
:param data: pandas DataFrame包含4个必需特征列。数据应为数值型或可转换为数值型。
:return: 生存函数列表每个元素为一个生存函数对象包含时间点x和生存概率y
:raises ValueError: 如果数据缺少必需特征或格式不正确。
"""
# 检查必需特征是否存在
missing_features = [feat for feat in self.features if feat not in data.columns]
if missing_features:
raise ValueError(f"数据缺少必需特征: {missing_features}")
# 提取特征数据
try:
x_test = data[self.features].astype(float) # 确保数值型
except ValueError as e:
raise ValueError(f"特征数据转换失败,请检查数据类型: {str(e)}")
# 进行预测
survival_functions = self.rsf.predict_survival_function(x_test)
return list(survival_functions)
def plot_survival(
self, survival_functions: list, save_path: str = None, show_plot: bool = True
):
"""
可视化生存函数,生成生存概率图表。
:param survival_functions: predict_survival返回的生存函数列表。
:param save_path: 可选,保存图表的路径(.png格式。如果为None则不保存。
:param show_plot: 是否显示图表(在交互环境中)。
"""
plt.figure(figsize=(10, 6))
for i, sf in enumerate(survival_functions):
plt.step(sf.x, sf.y, where="post", label=f"样本 {i + 1}")
plt.xlabel("时间(年)")
plt.ylabel("生存概率")
plt.title("管道生存概率预测")
plt.legend()
plt.grid(True, alpha=0.3)
if save_path:
plt.savefig(save_path, dpi=300, bbox_inches="tight")
print(f"图表已保存到: {save_path}")
if show_plot:
plt.show()
else:
plt.close()
# 调用说明示例
"""
在其他项目中使用PipelineHealthAnalyzer类的步骤
1. 安装依赖在requirements.txt中添加
joblib==1.5.0
pandas==2.2.3
numpy==2.0.2
scikit-survival==0.23.1
matplotlib==3.9.4
2. 导入类:
from pipeline_health_analyzer import PipelineHealthAnalyzer
3. 初始化分析器(替换为实际模型路径):
analyzer = PipelineHealthAnalyzer(model_path='path/to/my_survival_forest_model3-10.joblib')
4. 准备数据pandas DataFrame包含9个特征列
import pandas as pd
data = pd.DataFrame({
'Material': [1, 2], # 示例数据
'Diameter': [100, 150],
'Flow Velocity': [1.5, 2.0],
'Pressure': [50, 60],
'Temperature': [20, 25],
'Precipitation': [0.1, 0.2],
'Location': [1, 2],
'Structural Defects': [0, 1],
'Functional Defects': [0, 0]
})
5. 进行预测:
survival_funcs = analyzer.predict_survival(data)
6. 查看结果(每个样本的生存概率随时间变化):
for i, sf in enumerate(survival_funcs):
print(f"样本 {i+1}: 时间点: {sf.x[:5]}..., 生存概率: {sf.y[:5]}...")
7. 可视化(可选):
analyzer.plot_survival(survival_funcs, save_path='survival_plot.png')
注意:
- 数据格式必须匹配特征列表,特征值为数值型。
- 模型文件需从原项目复制或重新训练。
- 如果需要自定义特征或模型参数可修改类中的features列表或继承此类。
"""