SCADA 压力流量清洗模块新增数据填补
This commit is contained in:
@@ -6,15 +6,107 @@ from sklearn.impute import SimpleImputer
|
||||
import os
|
||||
|
||||
|
||||
def clean_pressure_data_km(input_csv_path: str, show_plot: bool = False) -> str:
|
||||
def fill_time_gaps(
|
||||
data: pd.DataFrame,
|
||||
time_col: str = "time",
|
||||
freq: str = "1min",
|
||||
short_gap_threshold: int = 10,
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
补齐缺失时间戳并填补数据缺口。
|
||||
|
||||
Args:
|
||||
data: 包含时间列的 DataFrame
|
||||
time_col: 时间列名(默认 'time')
|
||||
freq: 重采样频率(默认 '1min')
|
||||
short_gap_threshold: 短缺口阈值(分钟),<=此值用线性插值,>此值用前向填充
|
||||
|
||||
Returns:
|
||||
补齐时间后的 DataFrame(保留原时间列格式)
|
||||
"""
|
||||
if time_col not in data.columns:
|
||||
raise ValueError(f"时间列 '{time_col}' 不存在于数据中")
|
||||
|
||||
# 解析时间列并设为索引
|
||||
data = data.copy()
|
||||
data[time_col] = pd.to_datetime(data[time_col], utc=True)
|
||||
data_indexed = data.set_index(time_col)
|
||||
|
||||
# 生成完整时间范围
|
||||
full_range = pd.date_range(
|
||||
start=data_indexed.index.min(), end=data_indexed.index.max(), freq=freq
|
||||
)
|
||||
|
||||
# 重索引以补齐缺失时间点
|
||||
data_reindexed = data_indexed.reindex(full_range)
|
||||
|
||||
# 按列处理缺口
|
||||
for col in data_reindexed.columns:
|
||||
# 识别缺失值位置
|
||||
is_missing = data_reindexed[col].isna()
|
||||
|
||||
# 计算连续缺失的长度
|
||||
missing_groups = (is_missing != is_missing.shift()).cumsum()
|
||||
gap_lengths = is_missing.groupby(missing_groups).transform("sum")
|
||||
|
||||
# 短缺口:线性插值
|
||||
short_gap_mask = is_missing & (gap_lengths <= short_gap_threshold)
|
||||
if short_gap_mask.any():
|
||||
data_reindexed.loc[short_gap_mask, col] = (
|
||||
data_reindexed[col]
|
||||
.interpolate(method="linear", limit_area="inside")
|
||||
.loc[short_gap_mask]
|
||||
)
|
||||
|
||||
# 长缺口:前向填充
|
||||
long_gap_mask = is_missing & (gap_lengths > short_gap_threshold)
|
||||
if long_gap_mask.any():
|
||||
data_reindexed.loc[long_gap_mask, col] = (
|
||||
data_reindexed[col].ffill().loc[long_gap_mask]
|
||||
)
|
||||
|
||||
# 重置索引并恢复时间列(保留原格式)
|
||||
data_result = data_reindexed.reset_index()
|
||||
data_result.rename(columns={"index": time_col}, inplace=True)
|
||||
|
||||
# 保留时区信息
|
||||
data_result[time_col] = data_result[time_col].dt.strftime("%Y-%m-%dT%H:%M:%S%z")
|
||||
# 修正时区格式(Python的%z输出为+0000,需转为+00:00)
|
||||
data_result[time_col] = data_result[time_col].str.replace(
|
||||
r"(\+\d{2})(\d{2})$", r"\1:\2", regex=True
|
||||
)
|
||||
|
||||
return data_result
|
||||
|
||||
|
||||
def clean_pressure_data_km(
|
||||
input_csv_path: str, show_plot: bool = False, fill_gaps: bool = True
|
||||
) -> str:
|
||||
"""
|
||||
读取输入 CSV,基于 KMeans 检测异常并用滚动平均修复。输出为 <input_basename>_cleaned.xlsx(同目录)。
|
||||
原始数据在 sheet 'raw_pressure_data',处理后数据在 sheet 'cleaned_pressusre_data'。
|
||||
返回输出文件的绝对路径。
|
||||
|
||||
Args:
|
||||
input_csv_path: CSV 文件路径
|
||||
show_plot: 是否显示可视化
|
||||
fill_gaps: 是否先补齐时间缺口(默认 True)
|
||||
"""
|
||||
# 读取 CSV
|
||||
input_csv_path = os.path.abspath(input_csv_path)
|
||||
data = pd.read_csv(input_csv_path, header=0, index_col=None, encoding="utf-8")
|
||||
|
||||
# 补齐时间缺口(如果数据包含 time 列)
|
||||
if fill_gaps and "time" in data.columns:
|
||||
data = fill_time_gaps(
|
||||
data, time_col="time", freq="1min", short_gap_threshold=10
|
||||
)
|
||||
|
||||
# 分离时间列和数值列
|
||||
time_col_data = None
|
||||
if "time" in data.columns:
|
||||
time_col_data = data["time"]
|
||||
data = data.drop(columns=["time"])
|
||||
# 标准化
|
||||
data_norm = (data - data.mean()) / data.std()
|
||||
|
||||
@@ -86,11 +178,20 @@ def clean_pressure_data_km(input_csv_path: str, show_plot: bool = False) -> str:
|
||||
output_filename = f"{input_base}_cleaned.xlsx"
|
||||
output_path = os.path.join(input_dir, output_filename)
|
||||
|
||||
# 如果原始数据包含时间列,将其添加回结果
|
||||
data_for_save = data.copy()
|
||||
data_repaired_for_save = data_repaired.copy()
|
||||
if time_col_data is not None:
|
||||
data_for_save.insert(0, "time", time_col_data)
|
||||
data_repaired_for_save.insert(0, "time", time_col_data)
|
||||
|
||||
if os.path.exists(output_path):
|
||||
os.remove(output_path) # 覆盖同名文件
|
||||
with pd.ExcelWriter(output_path, engine="openpyxl") as writer:
|
||||
data.to_excel(writer, sheet_name="raw_pressure_data", index=False)
|
||||
data_repaired.to_excel(writer, sheet_name="cleaned_pressusre_data", index=False)
|
||||
data_for_save.to_excel(writer, sheet_name="raw_pressure_data", index=False)
|
||||
data_repaired_for_save.to_excel(
|
||||
writer, sheet_name="cleaned_pressusre_data", index=False
|
||||
)
|
||||
|
||||
# 返回输出文件的绝对路径
|
||||
return os.path.abspath(output_path)
|
||||
@@ -100,17 +201,20 @@ def clean_pressure_data_df_km(data: pd.DataFrame, show_plot: bool = False) -> di
|
||||
"""
|
||||
接收一个 DataFrame 数据结构,使用KMeans聚类检测异常并用滚动平均修复。
|
||||
返回清洗后的字典数据结构。
|
||||
|
||||
Args:
|
||||
data: 输入 DataFrame(可包含 time 列)
|
||||
show_plot: 是否显示可视化
|
||||
"""
|
||||
# 使用传入的 DataFrame
|
||||
data = data.copy()
|
||||
# 填充NaN值
|
||||
data = data.ffill().bfill()
|
||||
# 异常值预处理
|
||||
# 将0值替换为NaN,然后用线性插值填充
|
||||
data_filled = data.replace(0, np.nan)
|
||||
data_filled = data_filled.interpolate(method="linear", limit_direction="both")
|
||||
# 如果仍有NaN(全为0的列),用前后值填充
|
||||
data_filled = data_filled.ffill().bfill()
|
||||
|
||||
# 补齐时间缺口(如果启用且数据包含 time 列)
|
||||
data_filled = fill_time_gaps(
|
||||
data, time_col="time", freq="1min", short_gap_threshold=10
|
||||
)
|
||||
# 移除 time 列用于后续清洗
|
||||
data_filled = data_filled.drop(columns=["time"])
|
||||
|
||||
# 标准化(使用填充后的数据)
|
||||
data_norm = (data_filled - data_filled.mean()) / data_filled.std()
|
||||
|
||||
Reference in New Issue
Block a user