拆分online_Analysis.py文件
This commit is contained in:
197
app/services/network_import.py
Normal file
197
app/services/network_import.py
Normal file
@@ -0,0 +1,197 @@
|
||||
import csv
|
||||
import os
|
||||
|
||||
import chardet
|
||||
import psycopg
|
||||
from psycopg import sql
|
||||
|
||||
import app.services.project_info as project_info
|
||||
from app.native.api.postgresql_info import get_pgconn_string
|
||||
from app.services.tjnetwork import read_inp
|
||||
|
||||
|
||||
############################################################
|
||||
# network_update 10
|
||||
############################################################
|
||||
|
||||
|
||||
def network_update(file_path: str) -> None:
|
||||
"""
|
||||
更新pg数据库中的inp文件
|
||||
:param file_path: inp文件
|
||||
:return:
|
||||
"""
|
||||
read_inp("szh", file_path)
|
||||
|
||||
csv_path = "./history_pattern_flow.csv"
|
||||
|
||||
# # 检查文件是否存在
|
||||
# if os.path.exists(csv_path):
|
||||
# print(f"history_patterns_flows文件存在,开始处理...")
|
||||
#
|
||||
# # 读取 CSV 文件
|
||||
# df = pd.read_csv(csv_path)
|
||||
#
|
||||
# # 连接到 PostgreSQL 数据库(这里是数据库 "bb")
|
||||
# with psycopg.connect("dbname=bb host=127.0.0.1") as conn:
|
||||
# with conn.cursor() as cur:
|
||||
# for index, row in df.iterrows():
|
||||
# # 直接将数据插入,不进行唯一性检查
|
||||
# insert_sql = sql.SQL("""
|
||||
# INSERT INTO history_patterns_flows (id, factor, flow)
|
||||
# VALUES (%s, %s, %s);
|
||||
# """)
|
||||
# # 将数据插入数据库
|
||||
# cur.execute(insert_sql, (row['id'], row['factor'], row['flow']))
|
||||
# conn.commit()
|
||||
# print("数据成功导入到 'history_patterns_flows' 表格。")
|
||||
# else:
|
||||
# print(f"history_patterns_flows文件不存在。")
|
||||
# 检查文件是否存在
|
||||
if os.path.exists(csv_path):
|
||||
print(f"history_patterns_flows文件存在,开始处理...")
|
||||
|
||||
# 连接到 PostgreSQL 数据库(这里是数据库 "bb")
|
||||
with psycopg.connect(f"dbname={project_info.name} host=127.0.0.1") as conn:
|
||||
with conn.cursor() as cur:
|
||||
with open(csv_path, newline="", encoding="utf-8-sig") as csvfile:
|
||||
reader = csv.DictReader(csvfile)
|
||||
for row in reader:
|
||||
# 直接将数据插入,不进行唯一性检查
|
||||
insert_sql = sql.SQL(
|
||||
"""
|
||||
INSERT INTO history_patterns_flows (id, factor, flow)
|
||||
VALUES (%s, %s, %s);
|
||||
"""
|
||||
)
|
||||
# 将数据插入数据库
|
||||
cur.execute(insert_sql, (row["id"], row["factor"], row["flow"]))
|
||||
conn.commit()
|
||||
print("数据成功导入到 'history_patterns_flows' 表格。")
|
||||
else:
|
||||
print(f"history_patterns_flows文件不存在。")
|
||||
|
||||
|
||||
def submit_scada_info(name: str, coord_id: str) -> None:
|
||||
"""
|
||||
将scada信息表导入pg数据库
|
||||
:param name: 项目名称(数据库名称)
|
||||
:param coord_id: 坐标系的id,如4326,根据原始坐标信息输入
|
||||
:return:
|
||||
"""
|
||||
scada_info_path = "./scada_info.csv"
|
||||
# 检查文件是否存在
|
||||
if os.path.exists(scada_info_path):
|
||||
print(f"scada_info文件存在,开始处理...")
|
||||
|
||||
# 自动检测文件编码
|
||||
with open(scada_info_path, "rb") as file:
|
||||
raw_data = file.read()
|
||||
detected = chardet.detect(raw_data)
|
||||
file_encoding = detected["encoding"]
|
||||
print(f"检测到的文件编码:{file_encoding}")
|
||||
try:
|
||||
# 动态替换数据库名称
|
||||
conn_string = get_pgconn_string(db_name=name)
|
||||
|
||||
# 连接到 PostgreSQL 数据库(这里是数据库 "bb")
|
||||
with psycopg.connect(conn_string) as conn:
|
||||
with conn.cursor() as cur:
|
||||
# 检查 scada_info 表是否为空
|
||||
cur.execute("SELECT COUNT(*) FROM scada_info;")
|
||||
count = cur.fetchone()[0]
|
||||
|
||||
if count > 0:
|
||||
print("scada_info表中已有数据,正在清空记录...")
|
||||
cur.execute("DELETE FROM scada_info;")
|
||||
print("表记录已清空。")
|
||||
|
||||
with open(
|
||||
scada_info_path, newline="", encoding=file_encoding
|
||||
) as csvfile:
|
||||
reader = csv.DictReader(csvfile)
|
||||
for row in reader:
|
||||
# 将CSV单元格值为空的字段转换为 None
|
||||
cleaned_row = {
|
||||
key: (value if value.strip() else None)
|
||||
for key, value in row.items()
|
||||
}
|
||||
|
||||
# 处理 associated_source_outflow_id 列动态变化
|
||||
associated_columns = [
|
||||
f"associated_source_outflow_id{i}" for i in range(1, 21)
|
||||
]
|
||||
associated_values = [
|
||||
(
|
||||
cleaned_row.get(col).strip()
|
||||
if cleaned_row.get(col)
|
||||
and cleaned_row.get(col).strip()
|
||||
else None
|
||||
)
|
||||
for col in associated_columns
|
||||
]
|
||||
|
||||
# 将 X_coor 和 Y_coor 转换为 geometry 类型
|
||||
x_coor = (
|
||||
float(cleaned_row["X_coor"])
|
||||
if cleaned_row["X_coor"]
|
||||
else None
|
||||
)
|
||||
y_coor = (
|
||||
float(cleaned_row["Y_coor"])
|
||||
if cleaned_row["Y_coor"]
|
||||
else None
|
||||
)
|
||||
coord = (
|
||||
f"SRID={coord_id};POINT({x_coor} {y_coor})"
|
||||
if x_coor and y_coor
|
||||
else None
|
||||
)
|
||||
|
||||
# 准备插入 SQL 语句
|
||||
insert_sql = sql.SQL(
|
||||
"""
|
||||
INSERT INTO scada_info (
|
||||
id, type, associated_element_id, associated_pattern,
|
||||
associated_pipe_flow_id, {associated_columns},
|
||||
API_query_id, transmission_mode, transmission_frequency,
|
||||
reliability, X_coor, Y_coor, coord
|
||||
)
|
||||
VALUES (
|
||||
%s, %s, %s, %s, %s, {associated_placeholders},
|
||||
%s, %s, %s, %s, %s, %s, %s
|
||||
);
|
||||
"""
|
||||
).format(
|
||||
associated_columns=sql.SQL(", ").join(
|
||||
sql.Identifier(col) for col in associated_columns
|
||||
),
|
||||
associated_placeholders=sql.SQL(", ").join(
|
||||
sql.Placeholder() for _ in associated_columns
|
||||
),
|
||||
)
|
||||
# 将数据插入数据库
|
||||
cur.execute(
|
||||
insert_sql,
|
||||
(
|
||||
cleaned_row["id"],
|
||||
cleaned_row["type"],
|
||||
cleaned_row["associated_element_id"],
|
||||
cleaned_row.get("associated_pattern"),
|
||||
cleaned_row.get("associated_pipe_flow_id"),
|
||||
*associated_values,
|
||||
cleaned_row.get("API_query_id"),
|
||||
cleaned_row["transmission_mode"],
|
||||
cleaned_row["transmission_frequency"],
|
||||
cleaned_row["reliability"],
|
||||
x_coor,
|
||||
y_coor,
|
||||
coord,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
print("数据成功导入到 'scada_info' 表格。")
|
||||
except Exception as e:
|
||||
print(f"导入时出错:{e}")
|
||||
else:
|
||||
print(f"scada_info文件不存在。")
|
||||
Reference in New Issue
Block a user