fix bug and refine ,end of 2025
This commit is contained in:
11
.env
Normal file
11
.env
Normal file
@@ -0,0 +1,11 @@
|
||||
DB_NAME=szh
|
||||
DB_HOST=127.0.0.1
|
||||
DB_PORT=5433
|
||||
DB_USER=tjwater
|
||||
DB_PASSWORD=Tjwater@123456
|
||||
|
||||
TIMESCALEDB_DB_NAME=szh
|
||||
TIMESCALEDB_DB_HOST=127.0.0.1
|
||||
TIMESCALEDB_DB_PORT=5435
|
||||
TIMESCALEDB_DB_USER=tjwater
|
||||
TIMESCALEDB_DB_PASSWORD=Tjwater@123456
|
||||
@@ -1,6 +1,6 @@
|
||||
from .project import list_project, have_project, create_project, delete_project, clean_project
|
||||
from .project import is_project_open, open_project, close_project
|
||||
from .project import copy_project
|
||||
from .project_backup import list_project, have_project, create_project, delete_project, clean_project
|
||||
from .project_backup import is_project_open, open_project, close_project
|
||||
from .project_backup import copy_project
|
||||
|
||||
#DingZQ, 2024-12-28, convert inp v3 to v2
|
||||
from .inp_in import read_inp, import_inp, convert_inp_v3_to_v2
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import datetime
|
||||
import os
|
||||
from .project import *
|
||||
from .project_backup import *
|
||||
from .database import ChangeSet, write
|
||||
from .sections import *
|
||||
from .s0_base import get_region_type
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import os
|
||||
from .project import *
|
||||
from .project_backup import *
|
||||
from .database import ChangeSet
|
||||
from .sections import *
|
||||
from .s1_title import inp_out_title
|
||||
|
||||
36
api/postgresql_info.py
Normal file
36
api/postgresql_info.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
|
||||
load_dotenv()
|
||||
|
||||
pg_name = os.getenv("DB_NAME")
|
||||
pg_host = os.getenv("DB_HOST")
|
||||
pg_port = os.getenv("DB_PORT")
|
||||
pg_user = os.getenv("DB_USER")
|
||||
pg_password = os.getenv("DB_PASSWORD")
|
||||
|
||||
|
||||
def get_pgconn_string(
|
||||
db_name=pg_name,
|
||||
db_host=pg_host,
|
||||
db_port=pg_port,
|
||||
db_user=pg_user,
|
||||
db_password=pg_password,
|
||||
):
|
||||
"""返回 PostgreSQL 连接字符串"""
|
||||
return f"dbname={db_name} host={db_host} port={db_port} user={db_user} password={db_password}"
|
||||
|
||||
|
||||
def get_pg_config():
|
||||
"""返回 PostgreSQL 配置变量的字典"""
|
||||
return {
|
||||
"name": pg_name,
|
||||
"host": pg_host,
|
||||
"port": pg_port,
|
||||
"user": pg_user,
|
||||
}
|
||||
|
||||
|
||||
def get_pg_password():
|
||||
"""返回密码(谨慎使用)"""
|
||||
return pg_password
|
||||
118
api/project.py
118
api/project.py
@@ -2,33 +2,37 @@ import os
|
||||
import psycopg as pg
|
||||
from psycopg.rows import dict_row
|
||||
from .connection import g_conn_dict as conn
|
||||
from .postgresql_info import get_pgconn_string, get_pg_config, get_pg_password
|
||||
|
||||
# no undo/redo
|
||||
|
||||
_server_databases = ['template0', 'template1', 'postgres', 'project']
|
||||
_server_databases = ["template0", "template1", "postgres", "project"]
|
||||
|
||||
|
||||
def list_project() -> list[str]:
|
||||
ps = []
|
||||
with pg.connect(conninfo="dbname=postgres host=127.0.0.1", autocommit=True) as conn:
|
||||
with pg.connect(conninfo=get_pgconn_string(), autocommit=True) as conn:
|
||||
with conn.cursor(row_factory=dict_row) as cur:
|
||||
for p in cur.execute(f"select datname from pg_database where datname <> 'postgres' and datname <> 'template0' and datname <> 'template1' and datname <> 'project'"):
|
||||
ps.append(p['datname'])
|
||||
for p in cur.execute(
|
||||
f"select datname from pg_database where datname <> 'postgres' and datname <> 'template0' and datname <> 'template1' and datname <> 'project'"
|
||||
):
|
||||
ps.append(p["datname"])
|
||||
return ps
|
||||
|
||||
|
||||
def have_project(name: str) -> bool:
|
||||
with pg.connect(conninfo="dbname=postgres host=127.0.0.1", autocommit=True) as conn:
|
||||
with pg.connect(conninfo=get_pgconn_string(), autocommit=True) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(f"select * from pg_database where datname = '{name}'")
|
||||
return cur.rowcount > 0
|
||||
|
||||
|
||||
def copy_project(source: str, new: str) -> None:
|
||||
with pg.connect(conninfo="dbname=postgres host=127.0.0.1", autocommit=True) as conn:
|
||||
with pg.connect(conninfo=get_pgconn_string(), autocommit=True) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(f'create database "{new}" with template = {source}')
|
||||
|
||||
|
||||
# 2025-02-07, WMH
|
||||
# copyproject会把pg中operation这个表的全部内容也加进去,我们实际项目运行一周后operation这个表会变得特别大,导致CopyProject花费的时间很长,CopyProjectEx把operation的在复制时没有一块复制过去,节省时间
|
||||
class CopyProjectEx:
|
||||
@@ -39,105 +43,116 @@ class CopyProjectEx:
|
||||
connection.commit()
|
||||
|
||||
@staticmethod
|
||||
def execute_pg_dump(hostname, source_db, exclude_table_list):
|
||||
dump_command_structure = (
|
||||
f'pg_dump -h {hostname} -F c -s -f source_db_structure.dump {source_db}'
|
||||
)
|
||||
def execute_pg_dump(source_db, exclude_table_list):
|
||||
|
||||
os.environ["PGPASSWORD"] = get_pg_password() # 设置密码环境变量
|
||||
pg_config = get_pg_config()
|
||||
host = pg_config["host"]
|
||||
port = pg_config["port"]
|
||||
user = pg_config["user"]
|
||||
dump_command_structure = f"pg_dump -h {host} -p {port} -U {user} -F c -s -f source_db_structure.dump {source_db}"
|
||||
os.system(dump_command_structure)
|
||||
|
||||
if exclude_table_list is not None:
|
||||
exclude_table = ' '.join(['-T {}'.format(i) for i in exclude_table_list])
|
||||
dump_command_db = (
|
||||
f'pg_dump -h {hostname} -F c -a {exclude_table} -f source_db.dump {source_db}'
|
||||
)
|
||||
exclude_table = " ".join(["-T {}".format(i) for i in exclude_table_list])
|
||||
dump_command_db = f"pg_dump -h {host} -p {port} -U {user} -F c -a {exclude_table} -f source_db.dump {source_db}"
|
||||
else:
|
||||
dump_command_db = (
|
||||
f'pg_dump -h {hostname} -F c -a -f source_db.dump {source_db}'
|
||||
)
|
||||
dump_command_db = f"pg_dump -h {host} -p {port} -U {user} -F c -a -f source_db.dump {source_db}"
|
||||
os.system(dump_command_db)
|
||||
|
||||
@staticmethod
|
||||
def execute_pg_restore(hostname, new_db):
|
||||
restore_command_structure = (
|
||||
f'pg_restore -h {hostname} -d {new_db} source_db_structure.dump'
|
||||
)
|
||||
def execute_pg_restore(new_db):
|
||||
os.environ["PGPASSWORD"] = get_pg_password() # 设置密码环境变量
|
||||
pg_config = get_pg_config()
|
||||
host = pg_config["host"]
|
||||
port = pg_config["port"]
|
||||
user = pg_config["user"]
|
||||
restore_command_structure = f"pg_restore -h {host} -p {port} -U {user} -d {new_db} source_db_structure.dump"
|
||||
os.system(restore_command_structure)
|
||||
|
||||
restore_command_db = (
|
||||
f'pg_restore -h {hostname} -d {new_db} source_db.dump'
|
||||
f"pg_restore -h {host} -p {port} -U {user} -d {new_db} source_db.dump"
|
||||
)
|
||||
os.system(restore_command_db)
|
||||
|
||||
@staticmethod
|
||||
def init_operation_table(connection, excluded_table):
|
||||
with connection.cursor() as cursor:
|
||||
if 'operation' in excluded_table:
|
||||
insert_query \
|
||||
= "insert into operation (id, redo, undo, redo_cs, undo_cs) values (0, '', '', '', '')"
|
||||
if "operation" in excluded_table:
|
||||
insert_query = "insert into operation (id, redo, undo, redo_cs, undo_cs) values (0, '', '', '', '')"
|
||||
cursor.execute(insert_query)
|
||||
|
||||
if 'current_operation' in excluded_table:
|
||||
insert_query \
|
||||
= "insert into current_operation (id) values (0)"
|
||||
if "current_operation" in excluded_table:
|
||||
insert_query = "insert into current_operation (id) values (0)"
|
||||
cursor.execute(insert_query)
|
||||
|
||||
if 'restore_operation' in excluded_table:
|
||||
insert_query \
|
||||
= "insert into restore_operation (id) values (0)"
|
||||
if "restore_operation" in excluded_table:
|
||||
insert_query = "insert into restore_operation (id) values (0)"
|
||||
cursor.execute(insert_query)
|
||||
|
||||
if 'batch_operation' in excluded_table:
|
||||
insert_query \
|
||||
= "insert into batch_operation (id, redo, undo, redo_cs, undo_cs) values (0, '', '', '', '')"
|
||||
if "batch_operation" in excluded_table:
|
||||
insert_query = "insert into batch_operation (id, redo, undo, redo_cs, undo_cs) values (0, '', '', '', '')"
|
||||
cursor.execute(insert_query)
|
||||
|
||||
if 'operation_table' in excluded_table:
|
||||
insert_query \
|
||||
= "insert into operation_table (option) values ('operation')"
|
||||
if "operation_table" in excluded_table:
|
||||
insert_query = (
|
||||
"insert into operation_table (option) values ('operation')"
|
||||
)
|
||||
cursor.execute(insert_query)
|
||||
connection.commit()
|
||||
|
||||
def __call__(self, source: str, new: str, excluded_table: [str] = None) -> None:
|
||||
connection = pg.connect(conninfo="dbname=postgres host=127.0.0.1", autocommit=True)
|
||||
def __call__(self, source: str, new_db: str, excluded_tables: [str] = None) -> None:
|
||||
source_connection = pg.connect(conninfo=get_pgconn_string(), autocommit=True)
|
||||
|
||||
self.create_database(connection, new)
|
||||
self.execute_pg_dump('127.0.0.1', source, excluded_table)
|
||||
self.execute_pg_restore('127.0.0.1', new)
|
||||
self.create_database(source_connection, new_db)
|
||||
|
||||
connection = pg.connect(conninfo=f"dbname='{new}' host=127.0.0.1", autocommit=True)
|
||||
self.init_operation_table(connection, excluded_table)
|
||||
self.execute_pg_dump(source, excluded_tables)
|
||||
self.execute_pg_restore(new_db)
|
||||
source_connection.close()
|
||||
|
||||
new_db_connection = pg.connect(
|
||||
conninfo=get_pgconn_string(db_name=new_db), autocommit=True
|
||||
)
|
||||
self.init_operation_table(new_db_connection, excluded_tables)
|
||||
new_db_connection.close()
|
||||
|
||||
|
||||
def create_project(name: str) -> None:
|
||||
return copy_project('project', name)
|
||||
return copy_project("project", name)
|
||||
|
||||
|
||||
def delete_project(name: str) -> None:
|
||||
with pg.connect(conninfo="dbname=postgres host=127.0.0.1", autocommit=True) as conn:
|
||||
with pg.connect(conninfo=get_pgconn_string(), autocommit=True) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(f"select pg_terminate_backend(pid) from pg_stat_activity where datname = '{name}'")
|
||||
cur.execute(
|
||||
f"select pg_terminate_backend(pid) from pg_stat_activity where datname = '{name}'"
|
||||
)
|
||||
cur.execute(f'drop database "{name}"')
|
||||
|
||||
|
||||
def clean_project(excluded: list[str] = []) -> None:
|
||||
projects = list_project()
|
||||
with pg.connect(conninfo="dbname=postgres host=127.0.0.1", autocommit=True) as conn:
|
||||
with pg.connect(conninfo=get_pgconn_string(), autocommit=True) as conn:
|
||||
with conn.cursor(row_factory=dict_row) as cur:
|
||||
row = cur.execute(f"select current_database()").fetchone()
|
||||
if row != None:
|
||||
current_db = row['current_database']
|
||||
current_db = row["current_database"]
|
||||
if current_db in projects:
|
||||
projects.remove(current_db)
|
||||
for project in projects:
|
||||
if project in _server_databases or project in excluded:
|
||||
continue
|
||||
cur.execute(f"select pg_terminate_backend(pid) from pg_stat_activity where datname = '{project}'")
|
||||
cur.execute(
|
||||
f"select pg_terminate_backend(pid) from pg_stat_activity where datname = '{project}'"
|
||||
)
|
||||
cur.execute(f'drop database "{project}"')
|
||||
|
||||
|
||||
def open_project(name: str) -> None:
|
||||
if name not in conn:
|
||||
conn[name] = pg.connect(conninfo=f"dbname={name} host=127.0.0.1", autocommit=True)
|
||||
conn[name] = pg.connect(
|
||||
conninfo=get_pgconn_string(db_name=name), autocommit=True
|
||||
)
|
||||
|
||||
|
||||
def is_project_open(name: str) -> bool:
|
||||
@@ -148,4 +163,3 @@ def close_project(name: str) -> None:
|
||||
if name in conn:
|
||||
conn[name].close()
|
||||
del conn[name]
|
||||
|
||||
|
||||
152
api/project_backup.py
Normal file
152
api/project_backup.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import os
|
||||
import psycopg as pg
|
||||
from psycopg.rows import dict_row
|
||||
from .connection import g_conn_dict as conn
|
||||
from .postgresql_info import get_pgconn_string
|
||||
# no undo/redo
|
||||
|
||||
_server_databases = ['template0', 'template1', 'postgres', 'project']
|
||||
|
||||
|
||||
def list_project() -> list[str]:
|
||||
ps = []
|
||||
|
||||
with pg.connect(conninfo=get_pgconn_string(), autocommit=True) as conn:
|
||||
with conn.cursor(row_factory=dict_row) as cur:
|
||||
for p in cur.execute(f"select datname from pg_database where datname <> 'postgres' and datname <> 'template0' and datname <> 'template1' and datname <> 'project'"):
|
||||
ps.append(p['datname'])
|
||||
return ps
|
||||
|
||||
|
||||
def have_project(name: str) -> bool:
|
||||
with pg.connect(conninfo=get_pgconn_string(db_name=name), autocommit=True) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(f"select * from pg_database where datname = '{name}'")
|
||||
return cur.rowcount > 0
|
||||
|
||||
|
||||
def copy_project(source: str, new: str) -> None:
|
||||
with pg.connect(conninfo=get_pgconn_string(), autocommit=True) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(f'create database "{new}" with template = {source}')
|
||||
|
||||
# 2025-02-07, WMH
|
||||
# copyproject会把pg中operation这个表的全部内容也加进去,我们实际项目运行一周后operation这个表会变得特别大,导致CopyProject花费的时间很长,CopyProjectEx把operation的在复制时没有一块复制过去,节省时间
|
||||
class CopyProjectEx:
|
||||
@ staticmethod
|
||||
def create_database(connection, new_db):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f'create database "{new_db}"')
|
||||
connection.commit()
|
||||
|
||||
@staticmethod
|
||||
def execute_pg_dump(hostname, source_db, exclude_table_list):
|
||||
dump_command_structure = (
|
||||
f'pg_dump -h {hostname} -F c -s -f source_db_structure.dump {source_db}'
|
||||
)
|
||||
os.system(dump_command_structure)
|
||||
|
||||
if exclude_table_list is not None:
|
||||
exclude_table = ' '.join(['-T {}'.format(i) for i in exclude_table_list])
|
||||
dump_command_db = (
|
||||
f'pg_dump -h {hostname} -F c -a {exclude_table} -f source_db.dump {source_db}'
|
||||
)
|
||||
else:
|
||||
dump_command_db = (
|
||||
f'pg_dump -h {hostname} -F c -a -f source_db.dump {source_db}'
|
||||
)
|
||||
os.system(dump_command_db)
|
||||
|
||||
@staticmethod
|
||||
def execute_pg_restore(hostname, new_db):
|
||||
restore_command_structure = (
|
||||
f'pg_restore -h {hostname} -d {new_db} source_db_structure.dump'
|
||||
)
|
||||
os.system(restore_command_structure)
|
||||
|
||||
restore_command_db = (
|
||||
f'pg_restore -h {hostname} -d {new_db} source_db.dump'
|
||||
)
|
||||
os.system(restore_command_db)
|
||||
|
||||
@staticmethod
|
||||
def init_operation_table(connection, excluded_table):
|
||||
with connection.cursor() as cursor:
|
||||
if 'operation' in excluded_table:
|
||||
insert_query \
|
||||
= "insert into operation (id, redo, undo, redo_cs, undo_cs) values (0, '', '', '', '')"
|
||||
cursor.execute(insert_query)
|
||||
|
||||
if 'current_operation' in excluded_table:
|
||||
insert_query \
|
||||
= "insert into current_operation (id) values (0)"
|
||||
cursor.execute(insert_query)
|
||||
|
||||
if 'restore_operation' in excluded_table:
|
||||
insert_query \
|
||||
= "insert into restore_operation (id) values (0)"
|
||||
cursor.execute(insert_query)
|
||||
|
||||
if 'batch_operation' in excluded_table:
|
||||
insert_query \
|
||||
= "insert into batch_operation (id, redo, undo, redo_cs, undo_cs) values (0, '', '', '', '')"
|
||||
cursor.execute(insert_query)
|
||||
|
||||
if 'operation_table' in excluded_table:
|
||||
insert_query \
|
||||
= "insert into operation_table (option) values ('operation')"
|
||||
cursor.execute(insert_query)
|
||||
connection.commit()
|
||||
|
||||
def __call__(self, source: str, new: str, excluded_table: [str] = None) -> None:
|
||||
connection = pg.connect(conninfo=get_pgconn_string(), autocommit=True)
|
||||
|
||||
self.create_database(connection, new)
|
||||
self.execute_pg_dump('127.0.0.1', source, excluded_table)
|
||||
self.execute_pg_restore('127.0.0.1', new)
|
||||
|
||||
connection = pg.connect(conninfo=get_pgconn_string(db_name=new), autocommit=True)
|
||||
self.init_operation_table(connection, excluded_table)
|
||||
|
||||
|
||||
def create_project(name: str) -> None:
|
||||
return copy_project('project', name)
|
||||
|
||||
|
||||
def delete_project(name: str) -> None:
|
||||
with pg.connect(conninfo=get_pgconn_string(), autocommit=True) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(f"select pg_terminate_backend(pid) from pg_stat_activity where datname = '{name}'")
|
||||
cur.execute(f'drop database "{name}"')
|
||||
|
||||
|
||||
def clean_project(excluded: list[str] = []) -> None:
|
||||
projects = list_project()
|
||||
with pg.connect(conninfo=get_pgconn_string(), autocommit=True) as conn:
|
||||
with conn.cursor(row_factory=dict_row) as cur:
|
||||
row = cur.execute(f"select current_database()").fetchone()
|
||||
if row != None:
|
||||
current_db = row['current_database']
|
||||
if current_db in projects:
|
||||
projects.remove(current_db)
|
||||
for project in projects:
|
||||
if project in _server_databases or project in excluded:
|
||||
continue
|
||||
cur.execute(f"select pg_terminate_backend(pid) from pg_stat_activity where datname = '{project}'")
|
||||
cur.execute(f'drop database "{project}"')
|
||||
|
||||
|
||||
def open_project(name: str) -> None:
|
||||
if name not in conn:
|
||||
conn[name] = pg.connect(conninfo=get_pgconn_string(db_name=name), autocommit=True)
|
||||
|
||||
|
||||
def is_project_open(name: str) -> bool:
|
||||
return name in conn
|
||||
|
||||
|
||||
def close_project(name: str) -> None:
|
||||
if name in conn:
|
||||
conn[name].close()
|
||||
del conn[name]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import os
|
||||
import ctypes
|
||||
from .project import have_project
|
||||
from .project_backup import have_project
|
||||
from .inp_out import dump_inp
|
||||
|
||||
def calculate_service_area(name: str) -> list[dict[str, list[str]]]:
|
||||
|
||||
2408
api_ex/burst_locate_SCADA.py
Normal file
2408
api_ex/burst_locate_SCADA.py
Normal file
File diff suppressed because it is too large
Load Diff
109
api_ex/kmeans_sensor.py
Normal file
109
api_ex/kmeans_sensor.py
Normal file
@@ -0,0 +1,109 @@
|
||||
import wntr
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import sklearn.cluster
|
||||
import os
|
||||
|
||||
|
||||
|
||||
class QD_KMeans(object):
|
||||
def __init__(self, wn, num_monitors):
|
||||
# self.inp = inp
|
||||
self.cluster_num = num_monitors # 聚类中心个数,也即测压点个数
|
||||
self.wn=wn
|
||||
self.monitor_nodes = []
|
||||
self.coords = []
|
||||
self.junction_nodes = {} # Added missing initialization
|
||||
|
||||
|
||||
def get_junctions_coordinates(self):
|
||||
|
||||
for junction_name in self.wn.junction_name_list:
|
||||
junction = self.wn.get_node(junction_name)
|
||||
self.junction_nodes[junction_name] = junction.coordinates
|
||||
self.coords.append(junction.coordinates )
|
||||
|
||||
# print(f"Total junctions: {self.junction_coordinates}")
|
||||
|
||||
def select_monitoring_points(self):
|
||||
if not self.coords: # Add check if coordinates are collected
|
||||
self.get_junctions_coordinates()
|
||||
coords = np.array(self.coords)
|
||||
coords_normalized = (coords - coords.min(axis=0)) / (coords.max(axis=0) - coords.min(axis=0))
|
||||
kmeans = sklearn.cluster.KMeans(n_clusters= self.cluster_num, random_state=42)
|
||||
kmeans.fit(coords_normalized)
|
||||
|
||||
for center in kmeans.cluster_centers_:
|
||||
distances = np.sum((coords_normalized - center) ** 2, axis=1)
|
||||
nearest_node = self.wn.junction_name_list[np.argmin(distances)]
|
||||
self.monitor_nodes.append(nearest_node)
|
||||
|
||||
return self.monitor_nodes
|
||||
|
||||
|
||||
def visualize_network(self):
|
||||
"""Visualize network with monitoring points"""
|
||||
ax=wntr.graphics.plot_network(self.wn,
|
||||
node_attribute=self.monitor_nodes,
|
||||
node_size=30,
|
||||
title='Optimal sensor')
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
|
||||
def kmeans_sensor_placement(name: str, sensor_num: int, min_diameter: int) -> list:
|
||||
inp_name = f'./db_inp/{name}.db.inp'
|
||||
wn= wntr.network.WaterNetworkModel(inp_name)
|
||||
wn_cluster=QD_KMeans(wn, sensor_num)
|
||||
|
||||
# Select monitoring pointse
|
||||
sensor_ids= wn_cluster.select_monitoring_points()
|
||||
|
||||
# wn_cluster.visualize_network()
|
||||
|
||||
return sensor_ids
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
#sensorindex = get_ID(name='suzhouhe_2024_cloud_0817', sensor_num=30, min_diameter=500)
|
||||
sensorindex = kmeans_sensor_placement(name='szh', sensor_num=50, min_diameter=300)
|
||||
print(sensorindex)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
6
build_pyd_singelfile.py
Normal file
6
build_pyd_singelfile.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from distutils.core import setup
|
||||
from Cython.Build import cythonize
|
||||
|
||||
setup(ext_modules=cythonize([
|
||||
"api/project.py"
|
||||
]))
|
||||
85
epanet/apply_valve_renames.py
Normal file
85
epanet/apply_valve_renames.py
Normal file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env python3
|
||||
from pathlib import Path
|
||||
import re
|
||||
|
||||
inp = Path(r"d:\TJWaterServer\epanet\szhskeleton-patternfixed-ascii.inp")
|
||||
out = Path(r"d:\TJWaterServer\epanet\szhskeleton-patternfixed-ascii-fixed2.inp")
|
||||
mapout = Path(r"d:\TJWaterServer\epanet\szhskeleton-patternfixed-ascii-fixed2.mapping.txt")
|
||||
|
||||
text = inp.read_text(encoding='utf-8')
|
||||
lines = text.splitlines()
|
||||
|
||||
# find [VALVES] start and end
|
||||
start = None
|
||||
for i,l in enumerate(lines):
|
||||
if l.strip().upper() == '[VALVES]':
|
||||
start = i
|
||||
break
|
||||
if start is None:
|
||||
print('No [VALVES] section found')
|
||||
raise SystemExit(1)
|
||||
end = len(lines)
|
||||
for j in range(start+1, len(lines)):
|
||||
if re.match(r"^\s*\[.+\]", lines[j]):
|
||||
end = j
|
||||
break
|
||||
|
||||
# collect valve lines with their absolute numbers
|
||||
valve_entries = [] # (absolute_line_index, token, line)
|
||||
for idx in range(start+1, end):
|
||||
l = lines[idx]
|
||||
if not l.strip() or l.strip().startswith(';'):
|
||||
continue
|
||||
tok = l.split()[0]
|
||||
valve_entries.append((idx, tok, l))
|
||||
|
||||
from collections import defaultdict
|
||||
positions = defaultdict(list)
|
||||
for ln, tok, l in valve_entries:
|
||||
positions[tok].append(ln)
|
||||
|
||||
# find duplicates
|
||||
dups = {tok:lns for tok,lns in positions.items() if len(lns)>1}
|
||||
print('Found', sum(1 for _ in valve_entries), 'valve entries; duplicates:', len(dups))
|
||||
|
||||
replacements = [] # (line_index, old, new)
|
||||
counter = 1
|
||||
for tok, lns in dups.items():
|
||||
# skip first occurrence, rename others
|
||||
for occ_index, ln in enumerate(lns):
|
||||
if occ_index == 0:
|
||||
continue
|
||||
# produce new name: prefix V if starts with digit
|
||||
if re.fullmatch(r"\d+", tok) or re.match(r"^\d", tok):
|
||||
base = 'V' + tok
|
||||
else:
|
||||
base = tok
|
||||
new = f'{base}_{occ_index}'
|
||||
# ensure uniqueness globally
|
||||
while any(rn == new for _,_,rn in replacements) or any(new == t for t in positions.keys()):
|
||||
counter += 1
|
||||
new = f'{base}_{occ_index}_{counter}'
|
||||
replacements.append((ln, tok, new))
|
||||
|
||||
# Apply replacements on the given absolute lines
|
||||
for ln, old, new in replacements:
|
||||
line = lines[ln]
|
||||
# replace only first token occurrence
|
||||
parts = line.split()
|
||||
if parts:
|
||||
# find start of token in line (preserve spacing)
|
||||
m = re.search(re.escape(parts[0]), line)
|
||||
if m:
|
||||
startpos = m.start()
|
||||
endpos = m.end()
|
||||
newline = line[:startpos] + new + line[endpos:]
|
||||
lines[ln] = newline
|
||||
|
||||
# write new file
|
||||
out.write_text('\n'.join(lines) + '\n', encoding='utf-8')
|
||||
# write mapping
|
||||
with mapout.open('w', encoding='utf-8') as f:
|
||||
for ln, old, new in replacements:
|
||||
f.write(f'line {ln+1}: {old} -> {new}\n')
|
||||
|
||||
print('Wrote', out, 'with', len(replacements), 'replacements; mapping at', mapout)
|
||||
@@ -9,7 +9,7 @@ import subprocess
|
||||
import logging
|
||||
from typing import Any
|
||||
sys.path.append("..")
|
||||
from api import project
|
||||
from api import project_backup
|
||||
from api import inp_out
|
||||
|
||||
|
||||
@@ -243,7 +243,7 @@ def dump_output_binary(path: str) -> str:
|
||||
|
||||
#DingZQ, 2025-02-04, 返回dict[str, Any]
|
||||
def run_project_return_dict(name: str, readable_output: bool = False) -> dict[str, Any]:
|
||||
if not project.have_project(name):
|
||||
if not project_backup.have_project(name):
|
||||
raise Exception(f'Not found project [{name}]')
|
||||
|
||||
dir = os.path.abspath(os.getcwd())
|
||||
@@ -276,7 +276,7 @@ def run_project_return_dict(name: str, readable_output: bool = False) -> dict[st
|
||||
|
||||
# original code
|
||||
def run_project(name: str, readable_output: bool = False) -> str:
|
||||
if not project.have_project(name):
|
||||
if not project_backup.have_project(name):
|
||||
raise Exception(f'Not found project [{name}]')
|
||||
|
||||
dir = os.path.abspath(os.getcwd())
|
||||
|
||||
Binary file not shown.
64
epanet/fix_inp_nonascii.py
Normal file
64
epanet/fix_inp_nonascii.py
Normal file
@@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fix non-ASCII ID tokens in an EPANET .inp file by mapping each unique non-ASCII-containing token
|
||||
to an ASCII-safe name. Outputs a new INP and a mapping file for review.
|
||||
Usage: python fix_inp_nonascii.py input.inp [output.inp]
|
||||
"""
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python fix_inp_nonascii.py input.inp [output.inp]")
|
||||
sys.exit(2)
|
||||
|
||||
src = Path(sys.argv[1])
|
||||
if len(sys.argv) > 2:
|
||||
dst = Path(sys.argv[2])
|
||||
else:
|
||||
dst = src.with_name(src.stem + '-ascii' + src.suffix)
|
||||
|
||||
text = src.read_text(encoding='utf-8')
|
||||
# Find tokens that contain at least one non-ASCII char. Token = contiguous non-whitespace sequence
|
||||
nonascii_tokens = set(re.findall(r"\S*[^\x00-\x7F]\S*", text))
|
||||
if not nonascii_tokens:
|
||||
print("No non-ASCII tokens found. Copying source to destination unchanged.")
|
||||
dst.write_text(text, encoding='utf-8')
|
||||
sys.exit(0)
|
||||
|
||||
used = set()
|
||||
mapping = {}
|
||||
counter = 1
|
||||
# Sort tokens to get deterministic output
|
||||
for t in sorted(nonascii_tokens):
|
||||
# build ASCII prefix from characters that are safe (alnum, underscore, hyphen)
|
||||
prefix = ''.join(ch for ch in t if ord(ch) < 128 and (ch.isalnum() or ch in '_-'))
|
||||
if not prefix:
|
||||
prefix = 'ID'
|
||||
candidate = prefix
|
||||
# ensure candidate is unique and not equal to original token
|
||||
while candidate in used:
|
||||
candidate = f"{prefix}_x{counter}"
|
||||
counter += 1
|
||||
# if candidate accidentally equals the original token (rare), force suffix
|
||||
if candidate == t:
|
||||
candidate = f"{prefix}_x{counter}"
|
||||
counter += 1
|
||||
mapping[t] = candidate
|
||||
used.add(candidate)
|
||||
|
||||
# Replace occurrences safely using regex word boundary style (escape token)
|
||||
new_text = text
|
||||
for src_token, dst_token in mapping.items():
|
||||
# replace exact matches (no partial). Use lookarounds: not part of larger non-whitespace.
|
||||
pattern = re.escape(src_token)
|
||||
new_text = re.sub(pattern, dst_token, new_text)
|
||||
|
||||
# Write output files
|
||||
dst.write_text(new_text, encoding='utf-8')
|
||||
mapfile = dst.with_suffix(dst.suffix + '.mapping.txt')
|
||||
with mapfile.open('w', encoding='utf-8') as f:
|
||||
for k, v in mapping.items():
|
||||
f.write(f"{k} -> {v}\n")
|
||||
|
||||
print(f"Wrote: {dst}\nMapping: {mapfile}\nReplaced {len(mapping)} non-ASCII tokens.")
|
||||
144
epanet/fix_valve_ids.py
Normal file
144
epanet/fix_valve_ids.py
Normal file
@@ -0,0 +1,144 @@
|
||||
#!/usr/bin/env python3
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
inp = Path(r"d:\TJWaterServer\epanet\szhskeleton-patternfixed-ascii.inp")
|
||||
mapf = Path(r"d:\TJWaterServer\epanet\szhskeleton-patternfixed-ascii.inp.mapping.txt")
|
||||
out = Path(r"d:\TJWaterServer\epanet\szhskeleton-patternfixed-ascii-fixed.inp")
|
||||
outmap = out.with_suffix(out.suffix + '.mapping.txt')
|
||||
|
||||
text = inp.read_text(encoding='utf-8')
|
||||
|
||||
# parse mapping file (original -> mapped)
|
||||
map_original_to_mapped = {}
|
||||
if mapf.exists():
|
||||
for line in mapf.read_text(encoding='utf-8').splitlines():
|
||||
if '->' in line:
|
||||
a,b = line.split('->',1)
|
||||
map_original_to_mapped[a.strip()] = b.strip()
|
||||
|
||||
# find [VALVES] block
|
||||
m = re.search(r"(?mi)^\[VALVES\]\s*(?:;.*\n)?(.*?)(?=^\[|\Z)", text, flags=re.S|re.M)
|
||||
if not m:
|
||||
print('No [VALVES] section found')
|
||||
raise SystemExit(1)
|
||||
block = m.group(1)
|
||||
|
||||
# extract IDs (first non-empty token at start of each non-comment line)
|
||||
ids = []
|
||||
line_offsets = []
|
||||
lines = block.splitlines()
|
||||
for i,l in enumerate(lines):
|
||||
if not l.strip() or l.strip().startswith(';'):
|
||||
continue
|
||||
# split by whitespace
|
||||
toks = l.split()
|
||||
if toks:
|
||||
ids.append(toks[0])
|
||||
line_offsets.append((i, l))
|
||||
|
||||
# find duplicates
|
||||
from collections import defaultdict
|
||||
count = defaultdict(list)
|
||||
for idx, token in enumerate(ids):
|
||||
count[token].append(idx)
|
||||
|
||||
dups = {k:v for k,v in count.items() if len(v)>1}
|
||||
|
||||
print(f'Found {len(ids)} valve IDs; {len(dups)} duplicates')
|
||||
for k,v in list(dups.items())[:40]:
|
||||
print(k, 'occurs', len(v), 'times')
|
||||
|
||||
# Also find mapped collisions: multiple originals mapped to same mapped token
|
||||
mapped_rev = defaultdict(list)
|
||||
for orig,mapped in map_original_to_mapped.items():
|
||||
mapped_rev[mapped].append(orig)
|
||||
collisions = {m:origlist for m,origlist in mapped_rev.items() if len(origlist)>1}
|
||||
print('\nMapped collisions (same mapped token from multiple originals):', len(collisions))
|
||||
for m,ol in list(collisions.items())[:40]:
|
||||
print(m, ' <- ', ol[:5])
|
||||
|
||||
# We'll fix any ID that is purely digits, or any duplicate ID in the valves block.
|
||||
fixed_map = {} # oldToken -> newToken
|
||||
used = set(ids) # existing tokens in valves
|
||||
suffix_counter = 1
|
||||
|
||||
for token, positions in dups.items():
|
||||
# choose new unique names for subsequent occurrences (leave first occurrence as-is)
|
||||
for pos_index, occ in enumerate(positions):
|
||||
if pos_index == 0:
|
||||
continue
|
||||
base = token
|
||||
# if base is all digits or starts with digit, prefix with VAL_
|
||||
if re.fullmatch(r"\d+", base) or re.match(r"^\d", base):
|
||||
candidate = f'VAL_{base}'
|
||||
else:
|
||||
candidate = f'{base}_dup'
|
||||
# ensure uniqueness
|
||||
while candidate in used:
|
||||
candidate = f'{candidate}_{suffix_counter}'
|
||||
suffix_counter += 1
|
||||
used.add(candidate)
|
||||
fixed_map[token + f'__occ{pos_index}'] = candidate
|
||||
|
||||
# The above approach requires us to identify which exact occurrence to replace. We'll instead build a replacement pass that replaces only the Nth occurrence.
|
||||
# Build per-token occurrence numbers to replace subsequent ones.
|
||||
occ_to_new = {} # (token, occ_index) -> newname
|
||||
for token, positions in dups.items():
|
||||
for pos_index, occ in enumerate(positions):
|
||||
if pos_index == 0:
|
||||
continue
|
||||
if re.fullmatch(r"\d+", token) or re.match(r"^\d", token):
|
||||
candidate = f'VAL_{token}'
|
||||
else:
|
||||
candidate = f'{token}_dup'
|
||||
while candidate in used:
|
||||
candidate = f'{candidate}_{suffix_counter}'
|
||||
suffix_counter += 1
|
||||
used.add(candidate)
|
||||
occ_to_new[(token, pos_index)] = candidate
|
||||
|
||||
# Now construct new block replacing the Nth occurrence of duplicates token
|
||||
new_lines = []
|
||||
occ_seen = defaultdict(int)
|
||||
for l in lines:
|
||||
if not l.strip() or l.strip().startswith(';'):
|
||||
new_lines.append(l)
|
||||
continue
|
||||
toks = l.split()
|
||||
token = toks[0]
|
||||
occ_seen[token] += 1
|
||||
occ_idx = occ_seen[token]-1
|
||||
if (token, occ_idx) in occ_to_new:
|
||||
new_token = occ_to_new[(token, occ_idx)]
|
||||
# replace only the first token in the line
|
||||
rest = l[len(l.lstrip()):]
|
||||
# reconstruct preserving leading whitespace
|
||||
leading = l[:len(l)-len(l.lstrip())]
|
||||
# find start index of token in line
|
||||
m2 = re.match(r"(\s*)" + re.escape(token), l)
|
||||
if m2:
|
||||
leading = m2.group(1)
|
||||
new_line = leading + new_token + l[m2.end():]
|
||||
new_lines.append(new_line)
|
||||
# record mapping for global replacement
|
||||
fixed_map[token + f'__occ{occ_idx}'] = new_token
|
||||
else:
|
||||
new_lines.append(l)
|
||||
|
||||
# write new file by replacing block
|
||||
new_block = '\n'.join(new_lines) + '\n'
|
||||
new_text = text[:m.start(1)] + new_block + text[m.end(1):]
|
||||
out.write_text(new_text, encoding='utf-8')
|
||||
|
||||
# Create an updated mapping file: show which tokens were changed and why
|
||||
with outmap.open('w', encoding='utf-8') as f:
|
||||
f.write('Changes applied to fix duplicate valve IDs:\n')
|
||||
for k,v in occ_to_new.items():
|
||||
token, occ = k
|
||||
f.write(f'{token} occurrence {occ} -> {v}\n')
|
||||
f.write('\nNote: These replacements are only for valve ID occurrences beyond the first.\n')
|
||||
|
||||
print('Wrote', out, 'and mapping', outmap)
|
||||
print('Replacements:', len(occ_to_new))
|
||||
print('If you want different naming (e.g. prefix with V_), rerun with that preference.')
|
||||
65
epanet/fix_valve_ids2.py
Normal file
65
epanet/fix_valve_ids2.py
Normal file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env python3
|
||||
from pathlib import Path
|
||||
import re
|
||||
|
||||
inp = Path(r"d:\TJWaterServer\epanet\szhskeleton-patternfixed-ascii.inp")
|
||||
text = inp.read_text(encoding='utf-8')
|
||||
lines = text.splitlines()
|
||||
|
||||
start = None
|
||||
for i,l in enumerate(lines):
|
||||
if l.strip().upper() == '[VALVES]':
|
||||
start = i
|
||||
break
|
||||
if start is None:
|
||||
print('No [VALVES] section found')
|
||||
raise SystemExit(1)
|
||||
# collect until next section header or EOF
|
||||
end = len(lines)
|
||||
for j in range(start+1, len(lines)):
|
||||
if re.match(r"^\s*\[.+\]", lines[j]):
|
||||
end = j
|
||||
break
|
||||
block_lines = lines[start+1:end]
|
||||
|
||||
ids = []
|
||||
for idx,l in enumerate(block_lines, start=start+1):
|
||||
if not l.strip() or l.strip().startswith(';'):
|
||||
continue
|
||||
# first token
|
||||
tok = l.split()[0]
|
||||
ids.append((idx, tok, l))
|
||||
|
||||
from collections import defaultdict
|
||||
count = defaultdict(list)
|
||||
for ln, tok, l in ids:
|
||||
count[tok].append(ln)
|
||||
|
||||
dups = {k:v for k,v in count.items() if len(v)>1}
|
||||
print('Total valve entries found:', len(ids))
|
||||
print('Duplicate token count:', len(dups))
|
||||
if dups:
|
||||
print('\nSample duplicates:')
|
||||
for k,v in list(dups.items())[:20]:
|
||||
print(k, 'lines:', v)
|
||||
|
||||
# show whether tokens are purely digits
|
||||
num_only = [tok for ln,tok,l in ids if re.fullmatch(r'\d+', tok)]
|
||||
print('\nNumeric-only valve IDs count:', len(num_only))
|
||||
|
||||
# show examples of numeric-only
|
||||
if num_only:
|
||||
print('Examples:', num_only[:20])
|
||||
|
||||
# write a short report
|
||||
rep = inp.with_name(inp.stem + '-valves-report.txt')
|
||||
with rep.open('w', encoding='utf-8') as f:
|
||||
f.write(f'Total valve entries: {len(ids)}\n')
|
||||
f.write(f'Duplicate tokens: {len(dups)}\n')
|
||||
for k,v in dups.items():
|
||||
f.write(f'{k}: lines {v}\n')
|
||||
f.write('\nNumeric-only tokens:\n')
|
||||
for tok in sorted(set(num_only)):
|
||||
f.write(tok + '\n')
|
||||
|
||||
print('Wrote report to', rep)
|
||||
Binary file not shown.
2
main.py
2
main.py
@@ -170,7 +170,7 @@ async def startup_db():
|
||||
logger.info('**********************************************************')
|
||||
|
||||
# open 'szh' by default
|
||||
open_project("fx2026")
|
||||
open_project(project_info.name)
|
||||
|
||||
############################################################
|
||||
# auth
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import os
|
||||
from tjnetwork import *
|
||||
from api.project import CopyProjectEx
|
||||
from api.project_backup import CopyProjectEx
|
||||
from run_simulation import run_simulation_ex, from_clock_to_seconds_2
|
||||
from math import sqrt, pi
|
||||
from epanet.epanet import Output
|
||||
|
||||
@@ -1 +1 @@
|
||||
name='fx2026'
|
||||
name='szh'
|
||||
18
run_server.py
Normal file
18
run_server.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import asyncio
|
||||
import sys
|
||||
import uvicorn
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Windows 设置事件循环策略
|
||||
if sys.platform == "win32":
|
||||
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||
|
||||
# 用 uvicorn.run 支持 workers 参数
|
||||
uvicorn.run(
|
||||
"main:app",
|
||||
host="0.0.0.0",
|
||||
port=8000,
|
||||
workers=2, # 这里可以设置多进程
|
||||
loop="asyncio",
|
||||
)
|
||||
@@ -1,9 +1,9 @@
|
||||
REM f:
|
||||
REM cd "f:\DEV\GitHub\TJWaterServer"
|
||||
|
||||
git pull
|
||||
REM git pull
|
||||
|
||||
REM call startpg.bat
|
||||
cd C:\SourceCode\Server
|
||||
cd d:\TJWaterServer\
|
||||
REM uvicorn main:app --host 0.0.0.0 --port 80 --reload
|
||||
uvicorn main:app --host 0.0.0.0 --port 80
|
||||
python -m uvicorn main:app --host 0.0.0.0 --port 8000 --reload
|
||||
|
||||
Reference in New Issue
Block a user