重构现代化 FastAPI 后端项目框架
This commit is contained in:
0
app/services/__init__.py
Normal file
0
app/services/__init__.py
Normal file
1
app/services/epanet/__init__.py
Normal file
1
app/services/epanet/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .epanet import run_project, run_project_return_dict, run_inp, dump_output
|
||||
BIN
app/services/epanet/epanet-output.dll
Normal file
BIN
app/services/epanet/epanet-output.dll
Normal file
Binary file not shown.
451
app/services/epanet/epanet.py
Normal file
451
app/services/epanet/epanet.py
Normal file
@@ -0,0 +1,451 @@
|
||||
import ctypes
|
||||
import platform
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import base64
|
||||
from datetime import datetime
|
||||
import subprocess
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
sys.path.append("..")
|
||||
from api import project
|
||||
from api import inp_out
|
||||
|
||||
|
||||
def _verify_platform():
|
||||
_platform = platform.system()
|
||||
if _platform not in ["Windows", "Linux"]:
|
||||
raise Exception(f"Platform {_platform} unsupported (not yet)")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
_verify_platform()
|
||||
|
||||
|
||||
class Output:
|
||||
def __init__(self, path: str) -> None:
|
||||
self._path = path
|
||||
|
||||
if platform.system() == "Windows":
|
||||
self._lib = ctypes.CDLL(
|
||||
os.path.join(os.getcwd(), "epanet", "epanet-output.dll")
|
||||
)
|
||||
else:
|
||||
self._lib = ctypes.CDLL(
|
||||
os.path.join(os.getcwd(), "epanet", "linux", "libepanet-output.so")
|
||||
)
|
||||
|
||||
self._handle = ctypes.c_void_p()
|
||||
self._check(self._lib.ENR_init(ctypes.byref(self._handle)))
|
||||
|
||||
self._check(
|
||||
self._lib.ENR_open(self._handle, ctypes.c_char_p(self._path.encode()))
|
||||
)
|
||||
|
||||
def __del__(self):
|
||||
# throw exception in destructor ? :)
|
||||
self._check(self._lib.ENR_close(ctypes.byref(self._handle)))
|
||||
|
||||
def _check(self, result):
|
||||
if result != 0 and result != 10:
|
||||
msg = ctypes.c_char_p()
|
||||
code = self._lib.ENR_checkError(self._handle, ctypes.byref(msg))
|
||||
assert code == result
|
||||
|
||||
error = f"Failed to read project [{self._path}] output, message [{msg.value.decode()}]"
|
||||
|
||||
self._lib.ENR_free(ctypes.byref(msg))
|
||||
|
||||
raise Exception(error)
|
||||
|
||||
def version(self) -> int:
|
||||
v = ctypes.c_int()
|
||||
self._check(self._lib.ENR_getVersion(self._handle, ctypes.byref(v)))
|
||||
return v.value
|
||||
|
||||
def net_size(self) -> dict[str, int]:
|
||||
element_count = ctypes.POINTER(ctypes.c_int)()
|
||||
length = ctypes.c_int()
|
||||
self._check(
|
||||
self._lib.ENR_getNetSize(
|
||||
self._handle, ctypes.byref(element_count), ctypes.byref(length)
|
||||
)
|
||||
)
|
||||
assert length.value == 5
|
||||
category = ["node", "tank", "link", "pump", "valve"]
|
||||
sizes = {}
|
||||
for i in range(length.value):
|
||||
sizes[category[i]] = element_count[i]
|
||||
self._lib.ENR_free(ctypes.byref(element_count))
|
||||
return sizes
|
||||
|
||||
def units(self) -> dict[str, str]:
|
||||
f_us = ["CFS", "GPM", "MGD", "IMGD", "AFD", "LPS", "LPM", "MLD", "CMH", "CMD"]
|
||||
p_us = ["PSI", "MTR", "KPA"]
|
||||
q_us = ["NONE", "MGL", "UGL", "HOURS", "PRCNT"]
|
||||
f, p, q = ctypes.c_int(1), ctypes.c_int(2), ctypes.c_int(3)
|
||||
f_u, p_u, q_u = ctypes.c_int(), ctypes.c_int(), ctypes.c_int()
|
||||
self._check(self._lib.ENR_getUnits(self._handle, f, ctypes.byref(f_u)))
|
||||
self._check(self._lib.ENR_getUnits(self._handle, p, ctypes.byref(p_u)))
|
||||
self._check(self._lib.ENR_getUnits(self._handle, q, ctypes.byref(q_u)))
|
||||
return {
|
||||
"flow": f_us[f_u.value],
|
||||
"pressure": p_us[p_u.value],
|
||||
"quality": q_us[q_u.value],
|
||||
}
|
||||
|
||||
def times(self) -> dict[str, int]:
|
||||
ts = []
|
||||
for i in range(1, 5):
|
||||
t = ctypes.c_int(1)
|
||||
self._check(
|
||||
self._lib.ENR_getTimes(self._handle, ctypes.c_int(i), ctypes.byref(t))
|
||||
)
|
||||
ts.append(t.value)
|
||||
d = {}
|
||||
category = ["report_start", "report_step", "sim_duration", "num_periods"]
|
||||
for i in range(4):
|
||||
d[category[i]] = ts[i]
|
||||
return d
|
||||
|
||||
def element_name(self) -> dict[str, list[str]]:
|
||||
sizes = self.net_size()
|
||||
|
||||
node_type = ctypes.c_int(1)
|
||||
nodes = []
|
||||
for i in range(1, sizes["node"] + 1):
|
||||
name = ctypes.c_char_p()
|
||||
name_len = ctypes.c_int()
|
||||
self._check(
|
||||
self._lib.ENR_getElementName(
|
||||
self._handle,
|
||||
node_type,
|
||||
ctypes.c_int(i),
|
||||
ctypes.byref(name),
|
||||
ctypes.byref(name_len),
|
||||
)
|
||||
)
|
||||
nodes.append(name.value.decode())
|
||||
self._lib.ENR_free(ctypes.byref(name))
|
||||
|
||||
link_type = ctypes.c_int(2)
|
||||
links = []
|
||||
for i in range(1, sizes["link"] + 1):
|
||||
name = ctypes.c_char_p()
|
||||
name_len = ctypes.c_int()
|
||||
self._check(
|
||||
self._lib.ENR_getElementName(
|
||||
self._handle,
|
||||
link_type,
|
||||
ctypes.c_int(i),
|
||||
ctypes.byref(name),
|
||||
ctypes.byref(name_len),
|
||||
)
|
||||
)
|
||||
links.append(name.value.decode())
|
||||
self._lib.ENR_free(ctypes.byref(name))
|
||||
|
||||
return {"nodes": nodes, "links": links}
|
||||
|
||||
def energy_usage(self) -> list[dict[str, Any]]:
|
||||
size = self.net_size()["pump"]
|
||||
usages = []
|
||||
category = [
|
||||
"utilization",
|
||||
"avg.efficiency",
|
||||
"avg.kW/flow",
|
||||
"avg.kwatts",
|
||||
"max.kwatts",
|
||||
"cost/day",
|
||||
]
|
||||
links = self.element_name()["links"]
|
||||
for i in range(1, size + 1):
|
||||
index = ctypes.c_int()
|
||||
values = ctypes.POINTER(ctypes.c_float)()
|
||||
length = ctypes.c_int()
|
||||
self._check(
|
||||
self._lib.ENR_getEnergyUsage(
|
||||
self._handle,
|
||||
ctypes.c_int(i),
|
||||
ctypes.byref(index),
|
||||
ctypes.byref(values),
|
||||
ctypes.byref(length),
|
||||
)
|
||||
)
|
||||
assert length.value == 6
|
||||
d = {"pump": links[index.value - 1]}
|
||||
for j in range(length.value):
|
||||
d |= {category[j]: values[j]}
|
||||
usages.append(d)
|
||||
self._lib.ENR_free(ctypes.byref(values))
|
||||
return usages
|
||||
|
||||
def reactions(self) -> dict[str, float]:
|
||||
values = ctypes.POINTER(ctypes.c_float)()
|
||||
length = ctypes.c_int()
|
||||
self._check(
|
||||
self._lib.ENR_getNetReacts(
|
||||
self._handle, ctypes.byref(values), ctypes.byref(length)
|
||||
)
|
||||
)
|
||||
assert length.value == 4
|
||||
category = ["bulk", "wall", "tank", "source"]
|
||||
d = {}
|
||||
for i in range(4):
|
||||
d[category[i]] = values[i]
|
||||
self._lib.ENR_free(ctypes.byref(values))
|
||||
return d
|
||||
|
||||
def node_results(self) -> list[dict[str, Any]]:
|
||||
size = self.net_size()["node"]
|
||||
num_periods = self.times()["num_periods"]
|
||||
nodes = self.element_name()["nodes"]
|
||||
category = ["demand", "head", "pressure", "quality"]
|
||||
ds = []
|
||||
for i in range(1, size + 1):
|
||||
d = {"node": nodes[i - 1], "result": []}
|
||||
for j in range(num_periods):
|
||||
values = ctypes.POINTER(ctypes.c_float)()
|
||||
length = ctypes.c_int()
|
||||
self._check(
|
||||
self._lib.ENR_getNodeResult(
|
||||
self._handle, j, i, ctypes.byref(values), ctypes.byref(length)
|
||||
)
|
||||
)
|
||||
assert length.value == len(category)
|
||||
attributes = {}
|
||||
for k in range(length.value):
|
||||
attributes[category[k]] = values[k]
|
||||
d["result"].append(attributes)
|
||||
self._lib.ENR_free(ctypes.byref(values))
|
||||
ds.append(d)
|
||||
return ds
|
||||
|
||||
def link_results(self) -> list[dict[str, Any]]:
|
||||
size = self.net_size()["link"]
|
||||
num_periods = self.times()["num_periods"]
|
||||
links = self.element_name()["links"]
|
||||
category = [
|
||||
"flow",
|
||||
"velocity",
|
||||
"headloss",
|
||||
"quality",
|
||||
"status",
|
||||
"setting",
|
||||
"reaction",
|
||||
"friction",
|
||||
]
|
||||
ds = []
|
||||
|
||||
for i in range(1, size + 1):
|
||||
d = {"link": links[i - 1], "result": []}
|
||||
for j in range(num_periods):
|
||||
values = ctypes.POINTER(ctypes.c_float)()
|
||||
length = ctypes.c_int()
|
||||
self._check(
|
||||
self._lib.ENR_getLinkResult(
|
||||
self._handle, j, i, ctypes.byref(values), ctypes.byref(length)
|
||||
)
|
||||
)
|
||||
assert length.value == len(category)
|
||||
attributes = {}
|
||||
for k in range(length.value):
|
||||
if category[k] == "status":
|
||||
if values[k] == 2.0:
|
||||
attributes[category[k]] = "CLOSED"
|
||||
else:
|
||||
attributes[category[k]] = "OPEN"
|
||||
continue
|
||||
attributes[category[k]] = values[k]
|
||||
d["result"].append(attributes)
|
||||
self._lib.ENR_free(ctypes.byref(values))
|
||||
ds.append(d)
|
||||
return ds
|
||||
|
||||
def dump(self) -> dict[str, Any]:
|
||||
data = {}
|
||||
data |= {"version": self.version()}
|
||||
data |= {"net_size": self.net_size()}
|
||||
data |= {"units": self.units()}
|
||||
data |= {"times": self.times()}
|
||||
data |= {"element_name": self.element_name()}
|
||||
data |= {"energy_usage": self.energy_usage()}
|
||||
data |= {"reactions": self.reactions()}
|
||||
data |= {"node_results": self.node_results()}
|
||||
data |= {"link_results": self.link_results()}
|
||||
return data
|
||||
|
||||
|
||||
def _dump_output(path: str) -> dict[str, Any]:
|
||||
opt = Output(path)
|
||||
data = opt.dump()
|
||||
with open(path + ".json", "w") as f:
|
||||
json.dump(data, f)
|
||||
return data
|
||||
|
||||
|
||||
def dump_output(path: str) -> str:
|
||||
data = _dump_output(path)
|
||||
return json.dumps(data)
|
||||
|
||||
|
||||
def dump_report(path: str) -> str:
|
||||
return open(path, "r").read()
|
||||
|
||||
|
||||
def dump_output_binary(path: str) -> str:
|
||||
with open(path, "rb") as f:
|
||||
data = f.read()
|
||||
bast64_data = base64.b64encode(data)
|
||||
return str(bast64_data, "utf-8")
|
||||
|
||||
|
||||
# DingZQ, 2025-02-04, 返回dict[str, Any]
|
||||
def run_project_return_dict(name: str, readable_output: bool = False) -> dict[str, Any]:
|
||||
if not project.have_project(name):
|
||||
raise Exception(f"Not found project [{name}]")
|
||||
|
||||
dir = os.path.abspath(os.getcwd())
|
||||
|
||||
db_inp = os.path.join(os.path.join(dir, "db_inp"), name + ".db.inp")
|
||||
inp_out.dump_inp(name, db_inp, "2")
|
||||
|
||||
input = name + ".db"
|
||||
if platform.system() == "Windows":
|
||||
exe = os.path.join(os.path.join(dir, "epanet"), "runepanet.exe")
|
||||
else:
|
||||
exe = os.path.join(os.path.join(dir, "epanet"), "linux", "runepanet")
|
||||
inp = os.path.join(os.path.join(dir, "db_inp"), input + ".inp")
|
||||
rpt = os.path.join(os.path.join(dir, "temp"), input + ".rpt")
|
||||
opt = os.path.join(os.path.join(dir, "temp"), input + ".opt")
|
||||
command = f"{exe} {inp} {rpt} {opt}"
|
||||
|
||||
if platform.system() != "Windows":
|
||||
if not os.access(exe, os.X_OK):
|
||||
os.chmod(exe, 0o755)
|
||||
|
||||
data = {}
|
||||
|
||||
# 设置环境变量以包含库文件路径
|
||||
env = os.environ.copy()
|
||||
if platform.system() == "Linux":
|
||||
lib_dir = os.path.dirname(exe)
|
||||
env["LD_LIBRARY_PATH"] = f"{lib_dir}:{env.get('LD_LIBRARY_PATH', '')}"
|
||||
|
||||
# 使用 subprocess 替代 os.system 以传递 env
|
||||
process = subprocess.run(command, shell=True, env=env)
|
||||
result = process.returncode
|
||||
|
||||
if result != 0:
|
||||
data["simulation_result"] = "failed"
|
||||
else:
|
||||
data["simulation_result"] = "successful"
|
||||
if readable_output:
|
||||
data["output"] = _dump_output(opt)
|
||||
else:
|
||||
data["output"] = dump_output_binary(opt)
|
||||
|
||||
data["report"] = dump_report(rpt)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
# original code
|
||||
def run_project(name: str, readable_output: bool = False) -> str:
|
||||
if not project.have_project(name):
|
||||
raise Exception(f"Not found project [{name}]")
|
||||
|
||||
dir = os.path.abspath(os.getcwd())
|
||||
|
||||
db_inp = os.path.join(os.path.join(dir, "db_inp"), name + ".db.inp")
|
||||
inp_out.dump_inp(name, db_inp, "2")
|
||||
|
||||
input = name + ".db"
|
||||
if platform.system() == "Windows":
|
||||
exe = os.path.join(os.path.join(dir, "epanet"), "runepanet.exe")
|
||||
else:
|
||||
exe = os.path.join(os.path.join(dir, "epanet"), "linux", "runepanet")
|
||||
inp = os.path.join(os.path.join(dir, "db_inp"), input + ".inp")
|
||||
rpt = os.path.join(os.path.join(dir, "temp"), input + ".rpt")
|
||||
opt = os.path.join(os.path.join(dir, "temp"), input + ".opt")
|
||||
|
||||
command = f"{exe} {inp} {rpt} {opt}"
|
||||
logging.info(f"Run simulation at {datetime.now()}")
|
||||
logging.info(command)
|
||||
|
||||
if platform.system() != "Windows":
|
||||
if not os.access(exe, os.X_OK):
|
||||
os.chmod(exe, 0o755)
|
||||
|
||||
data = {}
|
||||
|
||||
# 设置环境变量以包含库文件路径
|
||||
env = os.environ.copy()
|
||||
if platform.system() == "Linux":
|
||||
lib_dir = os.path.dirname(exe)
|
||||
env["LD_LIBRARY_PATH"] = f"{lib_dir}:{env.get('LD_LIBRARY_PATH', '')}"
|
||||
|
||||
# DingZQ, 2025-06-02, 使用subprocess替代os.system
|
||||
process = subprocess.run(command, shell=True, env=env)
|
||||
result = process.returncode
|
||||
# logging.info(f"Simulation result: {result}")
|
||||
|
||||
if result != 0:
|
||||
data["simulation_result"] = "failed"
|
||||
|
||||
logging.error("simulation failed")
|
||||
|
||||
else:
|
||||
data["simulation_result"] = "successful"
|
||||
logging.info("simulation successful")
|
||||
|
||||
if readable_output:
|
||||
data |= _dump_output(opt)
|
||||
else:
|
||||
data["output"] = dump_output_binary(opt)
|
||||
|
||||
data["report"] = dump_report(rpt)
|
||||
# logging.info(f"Report: {data['report']}")
|
||||
|
||||
return json.dumps(data)
|
||||
|
||||
|
||||
def run_inp(name: str) -> str:
|
||||
dir = os.path.abspath(os.getcwd())
|
||||
|
||||
if platform.system() == "Windows":
|
||||
exe = os.path.join(os.path.join(dir, "epanet"), "runepanet.exe")
|
||||
else:
|
||||
exe = os.path.join(os.path.join(dir, "epanet"), "linux", "runepanet")
|
||||
inp = os.path.join(os.path.join(dir, "inp"), name + ".inp")
|
||||
rpt = os.path.join(os.path.join(dir, "temp"), name + ".rpt")
|
||||
opt = os.path.join(os.path.join(dir, "temp"), name + ".opt")
|
||||
command = f"{exe} {inp} {rpt} {opt}"
|
||||
|
||||
if platform.system() != "Windows":
|
||||
if not os.access(exe, os.X_OK):
|
||||
os.chmod(exe, 0o755)
|
||||
|
||||
data = {}
|
||||
|
||||
# 设置环境变量以包含库文件路径
|
||||
env = os.environ.copy()
|
||||
if platform.system() == "Linux":
|
||||
lib_dir = os.path.dirname(exe)
|
||||
env["LD_LIBRARY_PATH"] = f"{lib_dir}:{env.get('LD_LIBRARY_PATH', '')}"
|
||||
|
||||
process = subprocess.run(command, shell=True, env=env)
|
||||
result = process.returncode
|
||||
|
||||
if result != 0:
|
||||
data["simulation_result"] = "failed"
|
||||
else:
|
||||
data["simulation_result"] = "successful"
|
||||
# data |= _dump_output(opt)
|
||||
data["output"] = dump_output_binary(opt)
|
||||
|
||||
data["report"] = dump_report(rpt)
|
||||
|
||||
return json.dumps(data)
|
||||
BIN
app/services/epanet/epanet2.dll
Normal file
BIN
app/services/epanet/epanet2.dll
Normal file
Binary file not shown.
BIN
app/services/epanet/linux/libepanet-output.so
Normal file
BIN
app/services/epanet/linux/libepanet-output.so
Normal file
Binary file not shown.
BIN
app/services/epanet/linux/libepanet2.so
Normal file
BIN
app/services/epanet/linux/libepanet2.so
Normal file
Binary file not shown.
BIN
app/services/epanet/linux/runepanet
Executable file
BIN
app/services/epanet/linux/runepanet
Executable file
Binary file not shown.
BIN
app/services/epanet/runepanet.exe
Normal file
BIN
app/services/epanet/runepanet.exe
Normal file
Binary file not shown.
55
app/services/globals.py
Normal file
55
app/services/globals.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# simulation.py中的全局变量
|
||||
# reservoir basic height
|
||||
RESERVOIR_BASIC_HEIGHT = float(250.35)
|
||||
PATTERN_TIME_STEP = None # 浮点数
|
||||
# 实时数据类:element_id和api_query_id对应
|
||||
reservoirs_id = {}
|
||||
tanks_id = {}
|
||||
fixed_pumps_id ={}
|
||||
variable_pumps_id = {}
|
||||
pressure_id = {}
|
||||
demand_id = {}
|
||||
quality_id = {}
|
||||
# 实时数据类:pattern_id和api_query_id对应
|
||||
source_outflow_pattern_id = {}
|
||||
realtime_pipe_flow_pattern_id = {}
|
||||
pipe_flow_region_patterns = {} # 根据realtime的pipe_flow,对non_realtime的demand进行分区
|
||||
# 分区查询
|
||||
source_outflow_region = {} # 以绑定的管段作为value
|
||||
source_outflow_region_id = {} # 以api_query_id作为value
|
||||
source_outflow_region_patterns = {} # 以associated_pattern作为value
|
||||
# 非实时数据的pattern
|
||||
non_realtime_region_patterns = {} # 基于source_outflow_region进行区分
|
||||
realtime_region_pipe_flow_and_demand_id = {} # 基于source_outflow_region搜索该分区中的实时pipe_flow和demand的api_query_id,后续用region的流量 - 实时流量计的流量
|
||||
realtime_region_pipe_flow_and_demand_patterns = {} # 基于source_outflow_region搜索该分区中的实时pipe_flow和demand的associated_pattern,后续用region的流量 - 实时流量计的流量
|
||||
# ---------------------------------------------------------
|
||||
# influxdb_api.py中的全局变量
|
||||
# 全局变量,用于存储不同类型的realtime api_query_id
|
||||
reservoir_liquid_level_realtime_ids = []
|
||||
tank_liquid_level_realtime_ids = []
|
||||
fixed_pump_realtime_ids = []
|
||||
variable_pump_realtime_ids = []
|
||||
source_outflow_realtime_ids = []
|
||||
pipe_flow_realtime_ids = []
|
||||
pressure_realtime_ids = []
|
||||
demand_realtime_ids = []
|
||||
quality_realtime_ids = []
|
||||
# transmission_frequency的最大值
|
||||
transmission_frequency = None
|
||||
hydraulic_timestep = None # 时间字符串
|
||||
reservoir_liquid_level_non_realtime_ids = []
|
||||
tank_liquid_level_non_realtime_ids = []
|
||||
fixed_pump_non_realtime_ids = []
|
||||
variable_pump_non_realtime_ids = []
|
||||
source_outflow_non_realtime_ids = []
|
||||
pipe_flow_non_realtime_ids = []
|
||||
pressure_non_realtime_ids = []
|
||||
demand_non_realtime_ids = []
|
||||
quality_non_realtime_ids = []
|
||||
|
||||
# api_query_id和associated_element_id对应,不包含液位和泵
|
||||
scheme_source_outflow_ids = {}
|
||||
scheme_pipe_flow_ids = {}
|
||||
scheme_pressure_ids = {}
|
||||
scheme_demand_ids = {}
|
||||
scheme_quality_ids = {}
|
||||
152
app/services/mcp/router.py
Normal file
152
app/services/mcp/router.py
Normal file
@@ -0,0 +1,152 @@
|
||||
from fastmcp import FastMCP, Context
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
from ..postgresql.database import get_database_instance
|
||||
from ..postgresql.scada_info import ScadaRepository
|
||||
from ..postgresql.scheme import SchemeRepository
|
||||
|
||||
# 创建 MCP 服务器实例
|
||||
mcp = FastMCP("TJWater PostgreSQL Service", description="访问水务系统 PostgreSQL 数据库操作")
|
||||
|
||||
|
||||
# 数据库连接辅助函数
|
||||
async def get_database_connection(db_name: Optional[str] = None, ctx: Context = None):
|
||||
"""获取数据库连接,支持通过参数指定数据库名称"""
|
||||
if ctx:
|
||||
await ctx.info(f"连接到数据库: {db_name or '默认数据库'}")
|
||||
|
||||
instance = await get_database_instance(db_name)
|
||||
async with instance.get_connection() as conn:
|
||||
yield conn
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def get_scada_info(db_name: Optional[str] = None, ctx: Context = None) -> Dict[str, Any]:
|
||||
"""
|
||||
查询所有 SCADA 信息
|
||||
|
||||
Args:
|
||||
db_name: 可选的数据库名称,为空时使用默认数据库
|
||||
ctx: MCP 上下文,用于日志记录
|
||||
"""
|
||||
try:
|
||||
if ctx:
|
||||
await ctx.info("查询 SCADA 信息...")
|
||||
|
||||
async for conn in get_database_connection(db_name, ctx):
|
||||
scada_data = await ScadaRepository.get_scadas(conn)
|
||||
|
||||
if ctx:
|
||||
await ctx.info(f"检索到 {len(scada_data)} 条 SCADA 记录")
|
||||
|
||||
return {"success": True, "data": scada_data, "count": len(scada_data)}
|
||||
except Exception as e:
|
||||
error_msg = f"查询 SCADA 信息时发生错误: {str(e)}"
|
||||
if ctx:
|
||||
await ctx.error(error_msg)
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def get_scheme_list(db_name: Optional[str] = None, ctx: Context = None) -> Dict[str, Any]:
|
||||
"""
|
||||
查询所有方案信息
|
||||
|
||||
Args:
|
||||
db_name: 可选的数据库名称,为空时使用默认数据库
|
||||
ctx: MCP 上下文,用于日志记录
|
||||
"""
|
||||
try:
|
||||
if ctx:
|
||||
await ctx.info("查询方案信息...")
|
||||
|
||||
async for conn in get_database_connection(db_name, ctx):
|
||||
scheme_data = await SchemeRepository.get_schemes(conn)
|
||||
|
||||
if ctx:
|
||||
await ctx.info(f"检索到 {len(scheme_data)} 条方案记录")
|
||||
|
||||
return {"success": True, "data": scheme_data, "count": len(scheme_data)}
|
||||
except Exception as e:
|
||||
error_msg = f"查询方案信息时发生错误: {str(e)}"
|
||||
if ctx:
|
||||
await ctx.error(error_msg)
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def get_burst_locate_results(db_name: Optional[str] = None, ctx: Context = None) -> Dict[str, Any]:
|
||||
"""
|
||||
查询所有爆管定位结果
|
||||
|
||||
Args:
|
||||
db_name: 可选的数据库名称,为空时使用默认数据库
|
||||
ctx: MCP 上下文,用于日志记录
|
||||
"""
|
||||
try:
|
||||
if ctx:
|
||||
await ctx.info("查询爆管定位结果...")
|
||||
|
||||
async for conn in get_database_connection(db_name, ctx):
|
||||
burst_data = await SchemeRepository.get_burst_locate_results(conn)
|
||||
|
||||
if ctx:
|
||||
await ctx.info(f"检索到 {len(burst_data)} 条爆管记录")
|
||||
|
||||
return {"success": True, "data": burst_data, "count": len(burst_data)}
|
||||
except Exception as e:
|
||||
error_msg = f"查询爆管定位结果时发生错误: {str(e)}"
|
||||
if ctx:
|
||||
await ctx.error(error_msg)
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def get_burst_locate_result_by_incident(
|
||||
burst_incident: str,
|
||||
db_name: Optional[str] = None,
|
||||
ctx: Context = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
根据 burst_incident 查询爆管定位结果
|
||||
|
||||
Args:
|
||||
burst_incident: 爆管事件标识符
|
||||
db_name: 可选的数据库名称,为空时使用默认数据库
|
||||
ctx: MCP 上下文,用于日志记录
|
||||
"""
|
||||
try:
|
||||
if ctx:
|
||||
await ctx.info(f"查询爆管事件 {burst_incident} 的结果...")
|
||||
|
||||
async for conn in get_database_connection(db_name, ctx):
|
||||
result = await SchemeRepository.get_burst_locate_result_by_incident(
|
||||
conn, burst_incident
|
||||
)
|
||||
|
||||
if ctx:
|
||||
await ctx.info("检索到爆管事件数据")
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
error_msg = f"根据 burst_incident 查询爆管定位结果时发生错误: {str(e)}"
|
||||
if ctx:
|
||||
await ctx.error(error_msg)
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
|
||||
# 添加静态配置资源
|
||||
@mcp.resource("config://database/supported_databases")
|
||||
def get_supported_databases():
|
||||
"""列出支持的数据库配置"""
|
||||
return ["default", "backup", "analytics"]
|
||||
|
||||
|
||||
@mcp.resource("config://api/version")
|
||||
def get_api_version():
|
||||
"""获取 API 版本"""
|
||||
return "1.0.0"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mcp.run()
|
||||
1
app/services/project_info.py
Normal file
1
app/services/project_info.py
Normal file
@@ -0,0 +1 @@
|
||||
name='szh'
|
||||
1348
app/services/simulation.py
Normal file
1348
app/services/simulation.py
Normal file
File diff suppressed because it is too large
Load Diff
143
app/services/time_api.py
Normal file
143
app/services/time_api.py
Normal file
@@ -0,0 +1,143 @@
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from dateutil import parser, tz
|
||||
|
||||
'''
|
||||
2025-02-09T15:45:00+00:00 采用的是 ISO 8601 国际标准日期时间格式,具体特点如下:
|
||||
|
||||
日期部分:YYYY-MM-DD(年-月-日),例如2025-02-09表示2025年2月9日。
|
||||
时间部分:HH:mm:ss(时:分:秒),例如15:45:00表示下午3点45分0秒。
|
||||
分隔符:日期与时间之间用字母T连接,表明这是一个完整的时间点。
|
||||
时区偏移:末尾的+00:00表示该时间基于协调世界时(UTC),即零时区。若使用Z替代+00:00(如2025-02-09T15:45:00Z),也符合ISO 8601标准,两者等价
|
||||
|
||||
北京时间格式
|
||||
2025-02-09T15:45:00+08:00
|
||||
|
||||
'''
|
||||
BG_TZ = tz.gettz('Asia/Shanghai')
|
||||
UTC_TZ = tz.gettz('UTC')
|
||||
|
||||
def parse_utc_time(query_time: str) -> datetime:
|
||||
'''
|
||||
接受 任意格式的字符串,如果解析出来不带时区,则用 replace 添加 +00:00 时区
|
||||
如果解析出来已经有时区,则用 astimezone 转换成UTC时间
|
||||
'''
|
||||
|
||||
# 解析时间字符串
|
||||
dt: datetime = parser.parse(query_time)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=UTC_TZ)
|
||||
else:
|
||||
dt = dt.astimezone(UTC_TZ)
|
||||
|
||||
return dt
|
||||
|
||||
def parse_beijing_time(query_time: str) -> datetime:
|
||||
'''
|
||||
接受 任意格式的字符串,如果解析出来不带时区,则用 replace 添加 +08:00 时区
|
||||
如果解析出来已经有时区,则用 astimezone 转换成北京时间
|
||||
|
||||
也就是任意合法的时间字符串,最后都解析成 北京 时间
|
||||
|
||||
'''
|
||||
|
||||
# 解析时间字符串
|
||||
dt: datetime = parser.parse(query_time)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=BG_TZ)
|
||||
else:
|
||||
dt = dt.astimezone(tz=BG_TZ)
|
||||
|
||||
return dt
|
||||
|
||||
|
||||
def to_utc_time(dt: datetime) -> datetime:
|
||||
'''
|
||||
将一个北京时间的时间点,转换成utc
|
||||
'''
|
||||
utc_time = dt.astimezone(UTC_TZ)
|
||||
return utc_time
|
||||
|
||||
|
||||
def to_beijing_time(dt: datetime) -> datetime:
|
||||
'''
|
||||
将一个 utc 的时间点,转换成北京时间
|
||||
'''
|
||||
beijing_time = dt.astimezone(tz=BG_TZ)
|
||||
return beijing_time
|
||||
|
||||
|
||||
def to_time_range(dt: datetime, delta: float) -> tuple[datetime, datetime]:
|
||||
'''
|
||||
将一个时间点,转换成 start/end 时间段
|
||||
有些查询按照一个时间点查不到,用时间段保证能成功
|
||||
|
||||
delta 单位是秒
|
||||
'''
|
||||
start_time = dt - timedelta(seconds=delta)
|
||||
end_time = dt + timedelta(seconds=delta)
|
||||
|
||||
return (start_time, end_time)
|
||||
|
||||
def parse_beijing_date_range(query_date: str) -> tuple[datetime, datetime]:
|
||||
'''
|
||||
将一个日期字符串,转换成 start/end 时间段,传进来的日期被认为是北京时间
|
||||
日期字符串格式:YYYY-MM-DD
|
||||
'''
|
||||
start_time = parse_beijing_time(query_date)
|
||||
end_time = start_time + timedelta(days=1)
|
||||
|
||||
return (start_time, end_time)
|
||||
|
||||
|
||||
def get_day_start(dt: datetime.date) -> datetime:
|
||||
'''
|
||||
获取 某一天的 00:00:00
|
||||
这一天可以是北京时间,也可以是 utc 时间
|
||||
'''
|
||||
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
|
||||
|
||||
def get_day_end(dt: datetime.date) -> datetime:
|
||||
'''
|
||||
获取 某一天的 23:59:59
|
||||
这一天可以是北京时间,也可以是 utc 时间
|
||||
'''
|
||||
return dt.replace(hour=23, minute=59, second=59, microsecond=0)
|
||||
|
||||
def get_date_from_time(time: str) -> str:
|
||||
'''
|
||||
将一个时间点,转换成日期
|
||||
'''
|
||||
dt = parse_beijing_time(time)
|
||||
return str(dt.date())
|
||||
|
||||
|
||||
def is_today(query_date: str) -> bool:
|
||||
'''
|
||||
判断一个日期是否是今天
|
||||
'''
|
||||
dt = parse_beijing_time(query_date)
|
||||
return dt.date() == datetime.now().date()
|
||||
|
||||
|
||||
def is_yesterday(query_date: str) -> bool:
|
||||
'''
|
||||
判断一个日期是否是昨天
|
||||
'''
|
||||
dt = parse_beijing_time(query_date)
|
||||
return dt.date() == (datetime.now().date() - timedelta(days=1))
|
||||
|
||||
def is_tomorrow(query_date: str) -> bool:
|
||||
'''
|
||||
判断一个日期是否是明天
|
||||
'''
|
||||
dt = parse_beijing_time(query_date)
|
||||
return dt.date() == (datetime.now().date() + timedelta(days=1))
|
||||
|
||||
def is_today_or_future(query_date: str) -> bool:
|
||||
'''
|
||||
判断一个日期是否是今天或未来
|
||||
'''
|
||||
dt = parse_beijing_time(query_date)
|
||||
return dt.date() >= datetime.now().date()
|
||||
|
||||
1348
app/services/tjnetwork.py
Normal file
1348
app/services/tjnetwork.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user