重构现代化 FastAPI 后端项目框架
This commit is contained in:
9
scripts/AutoPullGitChanges.bat
Normal file
9
scripts/AutoPullGitChanges.bat
Normal file
@@ -0,0 +1,9 @@
|
||||
@echo off
|
||||
|
||||
:loop
|
||||
echo 正在执行 git pull...
|
||||
git pull
|
||||
echo 等待10分钟...
|
||||
timeout /t 600 /nobreak >nul
|
||||
|
||||
goto loop
|
||||
25
scripts/all_auto_task.py
Normal file
25
scripts/all_auto_task.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import auto_realtime
|
||||
import auto_store_non_realtime_SCADA_data
|
||||
import asyncio
|
||||
import influxdb_api
|
||||
import influxdb_info
|
||||
import project_info
|
||||
|
||||
# 为了让多个任务并发运行,我们可以用 asyncio.to_thread 分别启动它们
|
||||
async def main():
|
||||
task1 = asyncio.to_thread(auto_realtime.realtime_task)
|
||||
task2 = asyncio.to_thread(auto_store_non_realtime_SCADA_data.store_non_realtime_SCADA_data_task)
|
||||
await asyncio.gather(task1, task2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
url = influxdb_info.url
|
||||
token = influxdb_info.token
|
||||
org_name = influxdb_info.org
|
||||
|
||||
influxdb_api.query_pg_scada_info_realtime(project_info.name)
|
||||
influxdb_api.query_pg_scada_info_non_realtime(project_info.name)
|
||||
|
||||
# 用 asyncio 并发启动两个任务
|
||||
asyncio.run(main())
|
||||
|
||||
115
scripts/auto_cache.py
Normal file
115
scripts/auto_cache.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import schedule
|
||||
import time
|
||||
import datetime
|
||||
import shutil
|
||||
import redis
|
||||
import urllib.request
|
||||
import influxdb_api
|
||||
import msgpack
|
||||
import datetime
|
||||
|
||||
# 将 Query的信息 序列号到 redis/json, 默认不支持datetime,需要自定义
|
||||
# 自定义序列化函数
|
||||
# 序列化处理器
|
||||
def encode_datetime(obj):
|
||||
"""将datetime转换为可序列化的字典结构"""
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return {
|
||||
'__datetime__': True,
|
||||
'as_str': obj.strftime("%Y%m%dT%H:%M:%S.%f")
|
||||
}
|
||||
return obj
|
||||
|
||||
# 反序列化处理器
|
||||
def decode_datetime(obj):
|
||||
"""将字典还原为datetime对象"""
|
||||
if '__datetime__' in obj:
|
||||
return datetime.datetime.strptime(
|
||||
obj['as_str'], "%Y%m%dT%H:%M:%S.%f"
|
||||
)
|
||||
return obj
|
||||
|
||||
##########################
|
||||
# 需要用Python 3.12 来运行才能提高performance
|
||||
##########################
|
||||
|
||||
def queryallrecordsbydate(querydate: str, redis_client: redis.Redis):
|
||||
cache_key = f"queryallrecordsbydate_{querydate}"
|
||||
exists = redis_client.exists(cache_key)
|
||||
|
||||
if not exists:
|
||||
nodes_links: tuple = influxdb_api.query_all_records_by_date(query_date=querydate)
|
||||
redis_client.set(cache_key, msgpack.packb(nodes_links, default=encode_datetime))
|
||||
|
||||
def queryallrecordsbydate_by_url(querydate: str):
|
||||
print(f'queryallrecordsbydate: {querydate}')
|
||||
|
||||
try:
|
||||
response = urllib.request.urlopen(
|
||||
f"http://127.0.0.1/queryallrecordsbydate/?querydate={querydate}"
|
||||
)
|
||||
html = response.read().decode("utf-8")
|
||||
|
||||
except urllib.error.URLError as e:
|
||||
print("Error")
|
||||
|
||||
def queryallscadarecordsbydate(querydate: str, redis_client: redis.Redis):
|
||||
cache_key = f"queryallscadarecordsbydate_{querydate}"
|
||||
exists = redis_client.exists(cache_key)
|
||||
|
||||
if not exists:
|
||||
result_dict = influxdb_api.query_all_SCADA_records_by_date(query_date=querydate)
|
||||
redis_client.set(cache_key, msgpack.packb(result_dict, default=encode_datetime))
|
||||
|
||||
def queryallscadarecordsbydate_by_url(querydate: str):
|
||||
print(f'queryallscadarecordsbydate: {querydate}')
|
||||
|
||||
try:
|
||||
response = urllib.request.urlopen(
|
||||
f"http://127.0.0.1/queryallscadarecordsbydate/?querydate={querydate}"
|
||||
)
|
||||
html = response.read().decode("utf-8")
|
||||
|
||||
except urllib.error.URLError as e:
|
||||
print("Error")
|
||||
|
||||
|
||||
def auto_cache_data():
|
||||
# 初始化 Redis 连接
|
||||
# 用redis 限制并发访u
|
||||
redis_client = redis.Redis(host="127.0.0.1", port=6379, db=0)
|
||||
|
||||
# auto cache data for the last 3 days
|
||||
today = datetime.date.today()
|
||||
for i in range(1, 4):
|
||||
prev_day = today - datetime.timedelta(days=i)
|
||||
str_prev_day = prev_day.strftime('%Y-%m-%d')
|
||||
print(str_prev_day)
|
||||
|
||||
queryallrecordsbydate(str_prev_day, redis_client)
|
||||
queryallscadarecordsbydate(str_prev_day, redis_client)
|
||||
|
||||
redis_client.close()
|
||||
|
||||
def auto_cache_data_by_url():
|
||||
# auto cache data for the last 3 days
|
||||
today = datetime.date.today()
|
||||
for i in range(1, 4):
|
||||
prev_day = today - datetime.timedelta(days=i)
|
||||
str_prev_day = prev_day.strftime('%Y-%m-%d')
|
||||
print(str_prev_day)
|
||||
|
||||
queryallrecordsbydate_by_url(str_prev_day)
|
||||
queryallscadarecordsbydate_by_url(str_prev_day)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
auto_cache_data_by_url()
|
||||
|
||||
# auto run in the midnight
|
||||
schedule.every().day.at("03:00").do(auto_cache_data_by_url)
|
||||
|
||||
while True:
|
||||
schedule.run_pending()
|
||||
time.sleep(1)
|
||||
156
scripts/auto_realtime.py
Normal file
156
scripts/auto_realtime.py
Normal file
@@ -0,0 +1,156 @@
|
||||
from logging.handlers import TimedRotatingFileHandler
|
||||
import influxdb_api
|
||||
import os
|
||||
import logging
|
||||
import globals
|
||||
from datetime import datetime, timedelta, timezone
|
||||
import schedule
|
||||
import time
|
||||
import shutil
|
||||
from influxdb_client import InfluxDBClient, BucketsApi, WriteApi, OrganizationsApi, Point, QueryApi
|
||||
import simulation
|
||||
import influxdb_info
|
||||
import project_info
|
||||
|
||||
def setup_logger():
|
||||
# 创建日志目录
|
||||
log_dir = "logs"
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
|
||||
# 配置基础日志格式
|
||||
log_format = "%(asctime)s - %(levelname)s - %(message)s"
|
||||
formatter = logging.Formatter(log_format)
|
||||
|
||||
# 创建主 Logger
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.INFO) # 全局日志级别
|
||||
|
||||
# --- 1. 按日期分割的日志文件 Handler ---
|
||||
log_file = os.path.join(log_dir, "simulation.log")
|
||||
file_handler = TimedRotatingFileHandler(
|
||||
filename=log_file,
|
||||
when="midnight", # 每天午夜轮转
|
||||
interval=1,
|
||||
backupCount=7,
|
||||
encoding="utf-8"
|
||||
)
|
||||
file_handler.suffix = "simulation-%Y-%m-%d.log" # 文件名格式
|
||||
file_handler.setFormatter(formatter)
|
||||
file_handler.setLevel(logging.INFO) # 文件记录所有级别日志
|
||||
|
||||
# --- 2. 控制台实时输出 Handler ---
|
||||
console_handler = logging.StreamHandler() # 默认输出到 sys.stderr (控制台)
|
||||
console_handler.setFormatter(formatter)
|
||||
console_handler.setLevel(logging.INFO) # 控制台仅显示 INFO 及以上级别
|
||||
|
||||
# 将 Handler 添加到 Logger
|
||||
logger.addHandler(file_handler)
|
||||
#logger.addHandler(console_handler)
|
||||
|
||||
return logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
# 2025/02/01
|
||||
def get_next_time() -> str:
|
||||
"""
|
||||
获取下一个1分钟时间点,返回格式为字符串'YYYY-MM-DDTHH:MM:00+08:00'
|
||||
:return: 返回字符串格式的时间,表示下一个1分钟的时间点
|
||||
"""
|
||||
# 获取当前时间,并设定为北京时间
|
||||
now = datetime.now() # now 类型为 datetime,表示当前本地时间
|
||||
# 获取当前的分钟,并且将秒和微秒置为零
|
||||
current_time = now.replace(second=0, microsecond=0) # current_time 类型为 datetime,时间的秒和微秒部分被清除
|
||||
return current_time.strftime('%Y-%m-%dT%H:%M:%S+08:00')
|
||||
|
||||
|
||||
# 2025/02/06
|
||||
def store_realtime_SCADA_data_job() -> None:
|
||||
"""
|
||||
定义的任务1,每分钟执行1次,每次执行时,更新get_real_value_time并调用store_realtime_SCADA_data_to_influxdb函数
|
||||
:return: None
|
||||
"""
|
||||
# 获取当前时间并更新get_real_value_time,转换为字符串格式
|
||||
get_real_value_time: str = get_next_time() # get_real_value_time 类型为 str,格式为'2025-02-01T18:45:00+08:00'
|
||||
# 调用函数执行任务
|
||||
influxdb_api.store_realtime_SCADA_data_to_influxdb(get_real_value_time)
|
||||
logger.info('{} -- Successfully store realtime SCADA data.'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
|
||||
|
||||
|
||||
# 2025/02/06
|
||||
def get_next_15minute_time() -> str:
|
||||
"""
|
||||
获取下一个15分钟的时间点,返回格式为字符串'YYYY-MM-DDTHH:MM:00+08:00'
|
||||
:return: 返回字符串格式的时间,表示下一个15分钟执行时间点
|
||||
"""
|
||||
now = datetime.now()
|
||||
# 向上舍入到下一个15分钟
|
||||
next_15minute = (now.minute // 15 + 1) * 15 - 15
|
||||
if next_15minute == 60:
|
||||
next_15minute = 0
|
||||
now = now + timedelta(hours=1)
|
||||
next_time = now.replace(minute=next_15minute, second=0, microsecond=0)
|
||||
return next_time.strftime('%Y-%m-%dT%H:%M:%S+08:00')
|
||||
|
||||
|
||||
# 2025/02/07
|
||||
def run_simulation_job() -> None:
|
||||
"""
|
||||
定义的任务3,每15分钟执行一次在store_realtime_SCADA_data_to_influxdb之后执行run_simulation。
|
||||
:return: None
|
||||
"""
|
||||
# 获取当前时间,并检查是否是整点15分钟
|
||||
current_time = datetime.now()
|
||||
if current_time.minute % 15 == 0:
|
||||
print(f"{current_time.strftime('%Y-%m-%d %H:%M:%S')} -- Start simulation task.")
|
||||
# 计算前,获取scada_info中的信息,按照设定的方法修改pg数据库
|
||||
simulation.query_corresponding_element_id_and_query_id(project_info.name)
|
||||
simulation.query_corresponding_pattern_id_and_query_id(project_info.name)
|
||||
region_result = simulation.query_non_realtime_region(project_info.name)
|
||||
globals.source_outflow_region_id = simulation.get_source_outflow_region_id(project_info.name, region_result)
|
||||
globals.realtime_region_pipe_flow_and_demand_id = simulation.query_realtime_region_pipe_flow_and_demand_id(project_info.name, region_result)
|
||||
globals.pipe_flow_region_patterns = simulation.query_pipe_flow_region_patterns(project_info.name)
|
||||
globals.non_realtime_region_patterns = simulation.query_non_realtime_region_patterns(project_info.name, region_result)
|
||||
globals.source_outflow_region_patterns, realtime_region_pipe_flow_and_demand_patterns = simulation.get_realtime_region_patterns(project_info.name,
|
||||
globals.source_outflow_region_id,
|
||||
globals.realtime_region_pipe_flow_and_demand_id)
|
||||
modify_pattern_start_time: str = get_next_15minute_time() # 获取下一个15分钟时间点
|
||||
# print(modify_pattern_start_time)
|
||||
simulation.run_simulation(name=project_info.name, simulation_type="realtime", modify_pattern_start_time=modify_pattern_start_time)
|
||||
|
||||
logger.info('{} -- Successfully run simulation and store realtime simulation result.'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
|
||||
else:
|
||||
logger.info(f"{current_time.strftime('%Y-%m-%d %H:%M:%S')} -- Skipping the simulation task.")
|
||||
|
||||
|
||||
# 2025/02/06
|
||||
def realtime_task() -> None:
|
||||
"""
|
||||
定时执行任务1和,使用schedule库每1分钟执行一次store_realtime_SCADA_data_job函数。
|
||||
该任务会一直运行,定期调用store_realtime_SCADA_data_job获取SCADA数据。
|
||||
:return:
|
||||
"""
|
||||
# 等待到整分对齐
|
||||
now = datetime.now()
|
||||
wait_seconds = 60 - now.second
|
||||
time.sleep(wait_seconds)
|
||||
# 使用 .at(":00") 指定在每分钟的第0秒执行
|
||||
schedule.every(1).minute.at(":00").do(store_realtime_SCADA_data_job)
|
||||
# 每15分钟执行一次run_simulation_job
|
||||
schedule.every(1).minute.at(":00").do(run_simulation_job)
|
||||
# 持续执行任务,检查是否有待执行的任务
|
||||
while True:
|
||||
schedule.run_pending() # 执行所有待处理的定时任务
|
||||
time.sleep(1) # 暂停1秒,避免过于频繁的任务检查
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
url = influxdb_info.url
|
||||
token = influxdb_info.token
|
||||
org_name = influxdb_info.org
|
||||
|
||||
client = InfluxDBClient(url=url, token=token)
|
||||
# step2: 先查询pg数据库中scada_info的信息,然后存储SCADA数据到SCADA_data这个bucket里
|
||||
influxdb_api.query_pg_scada_info_realtime(project_info.name)
|
||||
# 自动执行
|
||||
realtime_task()
|
||||
139
scripts/auto_store_non_realtime_SCADA_data.py
Normal file
139
scripts/auto_store_non_realtime_SCADA_data.py
Normal file
@@ -0,0 +1,139 @@
|
||||
import influxdb_api
|
||||
import globals
|
||||
from datetime import datetime, timedelta, timezone
|
||||
import schedule
|
||||
import os
|
||||
import logging
|
||||
from logging.handlers import TimedRotatingFileHandler
|
||||
import time
|
||||
from influxdb_client import InfluxDBClient, BucketsApi, WriteApi, OrganizationsApi, Point, QueryApi
|
||||
import influxdb_info
|
||||
import project_info
|
||||
|
||||
def setup_logger():
|
||||
# 创建日志目录
|
||||
log_dir = "logs"
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
|
||||
# 配置基础日志格式
|
||||
log_format = "%(asctime)s - %(levelname)s - %(message)s"
|
||||
formatter = logging.Formatter(log_format)
|
||||
|
||||
# 创建主 Logger
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.INFO) # 全局日志级别
|
||||
|
||||
# --- 1. 按日期分割的日志文件 Handler ---
|
||||
log_file = os.path.join(log_dir, "scada.log")
|
||||
file_handler = TimedRotatingFileHandler(
|
||||
filename=log_file,
|
||||
when="midnight", # 每天午夜轮转
|
||||
interval=1,
|
||||
backupCount=7,
|
||||
encoding="utf-8"
|
||||
)
|
||||
file_handler.suffix = "scada-%Y-%m-%d.log" # 文件名格式
|
||||
file_handler.setFormatter(formatter)
|
||||
file_handler.setLevel(logging.INFO) # 文件记录 INFO 及以上级别
|
||||
|
||||
# --- 2. 控制台实时输出 Handler ---
|
||||
console_handler = logging.StreamHandler() # 默认输出到 sys.stderr (控制台)
|
||||
console_handler.setFormatter(formatter)
|
||||
console_handler.setLevel(logging.INFO) # 控制台仅显示 INFO 及以上级别
|
||||
|
||||
# 将 Handler 添加到 Logger
|
||||
logger.addHandler(file_handler)
|
||||
# logger.addHandler(console_handler)
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
# 2025/02/01
|
||||
def get_next_time() -> str:
|
||||
"""
|
||||
获取下一个1分钟时间点,返回格式为字符串'YYYY-MM-DDTHH:MM:00+08:00'
|
||||
:return: 返回字符串格式的时间,表示下一个1分钟的时间点
|
||||
"""
|
||||
# 获取当前时间,并设定为北京时间
|
||||
now = datetime.now() # now 类型为 datetime,表示当前本地时间
|
||||
# 获取当前的分钟,并且将秒和微秒置为零
|
||||
current_time = now.replace(second=0, microsecond=0) # current_time 类型为 datetime,时间的秒和微秒部分被清除
|
||||
return current_time.strftime('%Y-%m-%dT%H:%M:%S+08:00')
|
||||
|
||||
|
||||
# 2025/02/06
|
||||
def get_next_period_time() -> str:
|
||||
"""
|
||||
获取下一个6小时时间点,返回格式为字符串'YYYY-MM-DDTHH:00:00+08:00'
|
||||
:return: 返回字符串格式的时间,表示下一个6小时执行时间点
|
||||
"""
|
||||
# 获取当前时间,并设定为北京时间
|
||||
now = datetime.now() # now 类型为 datetime,表示当前本地时间
|
||||
# 获取当前的小时数并计算下一个6小时时间点
|
||||
next_period_hour = (now.hour // 6 + 1) * 6 - 6 # next_period_hour 类型为 int,表示下一个6小时时间点的小时部分
|
||||
# 如果计算的小时大于23,表示进入第二天,调整为00:00
|
||||
if next_period_hour >= 24:
|
||||
next_period_hour = 0
|
||||
now = now + timedelta(days=1) # 如果超过24小时,日期增加1天
|
||||
# 将秒和微秒部分清除,构建出下一个6小时点的datetime对象
|
||||
next_period_time = now.replace(hour=next_period_hour, minute=0, second=0, microsecond=0)
|
||||
return next_period_time.strftime('%Y-%m-%dT%H:%M:%S+08:00') # 格式化为指定的字符串格式并返回
|
||||
|
||||
|
||||
# 2025/02/06
|
||||
def store_non_realtime_SCADA_data_job() -> None:
|
||||
"""
|
||||
定义的任务2,每6小时执行一次,在0点、6点、12点、18点执行,执行时,更新get_history_data_end_time并调用store_non_realtime_SCADA_data_to_influxdb函数
|
||||
:return: None
|
||||
"""
|
||||
# 获取当前时间
|
||||
current_time = datetime.now()
|
||||
# 只在0点、6点、12点、18点执行任务
|
||||
# if current_time.hour % 6 == 0 and current_time.minute == 0:
|
||||
if current_time.minute % 10 == 0:
|
||||
logger.info(f"{current_time.strftime('%Y-%m-%d %H:%M:%S')} -- Start store non realtime SCADA data task.")
|
||||
# 获取下一个6小时的时间点,并更新get_history_data_end_time
|
||||
get_history_data_end_time: str = get_next_time() # get_history_data_end_time 类型为 str,格式为'2025-02-06T12:00:00+08:00'
|
||||
# print(get_next_time)
|
||||
# 调用函数执行任务
|
||||
influxdb_api.store_non_realtime_SCADA_data_to_influxdb(get_history_data_end_time)
|
||||
logger.info('{} -- Successfully store non realtime SCADA data.'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
|
||||
else:
|
||||
logger.info(f"{current_time.strftime('%Y-%m-%d %H:%M:%S')} -- Skipping store non realtime SCADA data task.")
|
||||
|
||||
|
||||
# 2025/02/06
|
||||
def store_non_realtime_SCADA_data_task() -> None:
|
||||
"""
|
||||
定时执行6小时的任务,使用schedule库每分钟执行一次store_non_realtime_SCADA_data_job函数。
|
||||
该任务会一直运行,定期调用store_non_realtime_SCADA_data_job获取SCADA数据。
|
||||
:return:
|
||||
"""
|
||||
# 等待到整分对齐
|
||||
now = datetime.now()
|
||||
wait_seconds = 60 - now.second
|
||||
time.sleep(wait_seconds)
|
||||
try:
|
||||
# 每分钟检查一次,执行store_non_realtime_SCADA_data_job
|
||||
schedule.every(1).minute.at(":00").do(store_non_realtime_SCADA_data_job)
|
||||
# 持续执行任务,检查是否有待执行的任务
|
||||
while True:
|
||||
schedule.run_pending() # 执行所有待处理的定时任务
|
||||
time.sleep(1) # 暂停1秒,避免过于频繁的任务检查
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.error(f"Error occurred in store_non_realtime_SCADA_data_task: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
url = influxdb_info.url
|
||||
token = influxdb_info.token
|
||||
org_name = influxdb_info.org
|
||||
|
||||
client = InfluxDBClient(url=url, token=token)
|
||||
# step2: 先查询pg数据库中scada_info的信息,然后存储SCADA数据到SCADA_data这个bucket里
|
||||
influxdb_api.query_pg_scada_info_non_realtime(project_info.name)
|
||||
# 自动执行
|
||||
store_non_realtime_SCADA_data_task()
|
||||
1
scripts/build_pyd.cmd
Normal file
1
scripts/build_pyd.cmd
Normal file
@@ -0,0 +1 @@
|
||||
python build_pyd.py build
|
||||
25
scripts/build_pyd.py
Normal file
25
scripts/build_pyd.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from distutils.core import setup
|
||||
from Cython.Build import cythonize
|
||||
|
||||
setup(ext_modules=cythonize([
|
||||
"main.py",
|
||||
"auto_realtime.py",
|
||||
"auto_store_non_realtime_SCADA_data.py",
|
||||
"tjnetwork.py",
|
||||
"online_Analysis.py",
|
||||
"sensitivity.py",
|
||||
"run_simlation.py",
|
||||
"run_simulation.py",
|
||||
"get_hist_data.py",
|
||||
"get_realValue.py",
|
||||
"get_data.py",
|
||||
"get_current_total_Q.py",
|
||||
"get_current_status.py",
|
||||
"influxdb_api.py",
|
||||
"influxdb_query_SCADA_data.py",
|
||||
"sensor_placement.py",
|
||||
"simulation.py",
|
||||
"time_api.py",
|
||||
"api/*.py",
|
||||
"epanet/*.py"
|
||||
]))
|
||||
5
scripts/clean_projects.py
Normal file
5
scripts/clean_projects.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from tjnetwork import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
clean_project()
|
||||
delete_project('project')
|
||||
22
scripts/copy_project.py
Normal file
22
scripts/copy_project.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import sys
|
||||
from tjnetwork import *
|
||||
|
||||
def main():
|
||||
argc = len(sys.argv)
|
||||
if argc < 2 or argc > 4:
|
||||
print("copy_project source [count]")
|
||||
return
|
||||
|
||||
source = sys.argv[1]
|
||||
if not have_project(source):
|
||||
print(f"{source} is not available")
|
||||
|
||||
if argc == 2:
|
||||
copy_project(source, f"{source}_1")
|
||||
elif argc == 3:
|
||||
count = int(sys.argv[2])
|
||||
for i in range(1, 1 + count):
|
||||
copy_project(source, f"{source}_{i}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
13
scripts/create_project.py
Normal file
13
scripts/create_project.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import sys
|
||||
from tjnetwork import *
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("create_project which_inp")
|
||||
return
|
||||
|
||||
inp = sys.argv[1]
|
||||
read_inp(inp, f'./inp/{inp}.inp', '2')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
13
scripts/create_project_v3.py
Normal file
13
scripts/create_project_v3.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import sys
|
||||
from tjnetwork import *
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("create_project which_inp")
|
||||
return
|
||||
|
||||
inp = sys.argv[1]
|
||||
read_inp(inp, f'./inp/{inp}.inp', '3')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
136
scripts/create_template.py
Normal file
136
scripts/create_template.py
Normal file
@@ -0,0 +1,136 @@
|
||||
import psycopg as pg
|
||||
|
||||
sql_create = [
|
||||
"script/sql/create/0.base.sql",
|
||||
"script/sql/create/1.title.sql",
|
||||
"script/sql/create/2.junctions.sql",
|
||||
"script/sql/create/3.reservoirs.sql",
|
||||
"script/sql/create/4.tanks.sql",
|
||||
"script/sql/create/5.pipes.sql",
|
||||
"script/sql/create/6.pumps.sql",
|
||||
"script/sql/create/7.valves.sql",
|
||||
"script/sql/create/8.tags.sql",
|
||||
"script/sql/create/9.demands.sql",
|
||||
"script/sql/create/10.status.sql",
|
||||
"script/sql/create/11.patterns.sql",
|
||||
"script/sql/create/12.curves.sql",
|
||||
"script/sql/create/13.controls.sql",
|
||||
"script/sql/create/14.rules.sql",
|
||||
"script/sql/create/15.energy.sql",
|
||||
"script/sql/create/16.emitters.sql",
|
||||
"script/sql/create/17.quality.sql",
|
||||
"script/sql/create/18.sources.sql",
|
||||
"script/sql/create/19.reactions.sql",
|
||||
"script/sql/create/20.mixing.sql",
|
||||
"script/sql/create/21.times.sql",
|
||||
"script/sql/create/22.report.sql",
|
||||
"script/sql/create/23.options.sql",
|
||||
"script/sql/create/24.coordinates.sql",
|
||||
"script/sql/create/25.vertices.sql",
|
||||
"script/sql/create/26.labels.sql",
|
||||
"script/sql/create/27.backdrop.sql",
|
||||
"script/sql/create/28.end.sql",
|
||||
"script/sql/create/29.scada_device.sql",
|
||||
"script/sql/create/30.scada_device_data.sql",
|
||||
"script/sql/create/31.scada_element.sql",
|
||||
"script/sql/create/32.region.sql",
|
||||
"script/sql/create/33.dma.sql",
|
||||
"script/sql/create/34.sa.sql",
|
||||
"script/sql/create/35.vd.sql",
|
||||
"script/sql/create/36.wda.sql",
|
||||
"script/sql/create/37.history_patterns_flows.sql",
|
||||
"script/sql/create/38.scada_info.sql",
|
||||
"script/sql/create/39.users.sql",
|
||||
"script/sql/create/40.scheme_list.sql",
|
||||
"script/sql/create/41.pipe_risk_probability.sql",
|
||||
"script/sql/create/42.sensor_placement.sql",
|
||||
"script/sql/create/43.burst_locate_result.sql",
|
||||
"script/sql/create/extension_data.sql",
|
||||
"script/sql/create/operation.sql"
|
||||
]
|
||||
|
||||
sql_drop = [
|
||||
"script/sql/drop/operation.sql",
|
||||
"script/sql/drop/extension_data.sql",
|
||||
"script/sql/drop/43.burst_locate_result.sql",
|
||||
"script/sql/drop/42.sensor_placement.sql",
|
||||
"script/sql/drop/41.pipe_risk_probability.sql",
|
||||
"script/sql/drop/40.scheme_list.sql",
|
||||
"script/sql/drop/39.users.sql",
|
||||
"script/sql/drop/38.scada_info.sql",
|
||||
"script/sql/drop/37.history_patterns_flows.sql",
|
||||
"script/sql/drop/36.wda.sql",
|
||||
"script/sql/drop/35.vd.sql",
|
||||
"script/sql/drop/34.sa.sql",
|
||||
"script/sql/drop/33.dma.sql",
|
||||
"script/sql/drop/32.region.sql",
|
||||
"script/sql/drop/31.scada_element.sql",
|
||||
"script/sql/drop/30.scada_device_data.sql",
|
||||
"script/sql/drop/29.scada_device.sql",
|
||||
"script/sql/drop/28.end.sql",
|
||||
"script/sql/drop/27.backdrop.sql",
|
||||
"script/sql/drop/26.labels.sql",
|
||||
"script/sql/drop/25.vertices.sql",
|
||||
"script/sql/drop/24.coordinates.sql",
|
||||
"script/sql/drop/23.options.sql",
|
||||
"script/sql/drop/22.report.sql",
|
||||
"script/sql/drop/21.times.sql",
|
||||
"script/sql/drop/20.mixing.sql",
|
||||
"script/sql/drop/19.reactions.sql",
|
||||
"script/sql/drop/18.sources.sql",
|
||||
"script/sql/drop/17.quality.sql",
|
||||
"script/sql/drop/16.emitters.sql",
|
||||
"script/sql/drop/15.energy.sql",
|
||||
"script/sql/drop/14.rules.sql",
|
||||
"script/sql/drop/13.controls.sql",
|
||||
"script/sql/drop/12.curves.sql",
|
||||
"script/sql/drop/11.patterns.sql",
|
||||
"script/sql/drop/10.status.sql",
|
||||
"script/sql/drop/9.demands.sql",
|
||||
"script/sql/drop/8.tags.sql",
|
||||
"script/sql/drop/7.valves.sql",
|
||||
"script/sql/drop/6.pumps.sql",
|
||||
"script/sql/drop/5.pipes.sql",
|
||||
"script/sql/drop/4.tanks.sql",
|
||||
"script/sql/drop/3.reservoirs.sql",
|
||||
"script/sql/drop/2.junctions.sql",
|
||||
"script/sql/drop/1.title.sql",
|
||||
"script/sql/drop/0.base.sql"
|
||||
]
|
||||
|
||||
def create_template():
|
||||
with pg.connect(conninfo="dbname=postgres host=127.0.0.1", autocommit=True) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("create database project")
|
||||
with pg.connect(conninfo="dbname=project host=127.0.0.1") as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute('create extension postgis cascade')
|
||||
cur.execute('create extension pgrouting cascade')
|
||||
for sql in sql_create:
|
||||
with open(sql, "r", encoding="utf-8") as f:
|
||||
cur.execute(f.read())
|
||||
print(f'executed {sql}')
|
||||
conn.commit()
|
||||
|
||||
def have_template():
|
||||
with pg.connect(conninfo="dbname=postgres host=127.0.0.1", autocommit=True) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("select * from pg_database where datname = 'project'")
|
||||
return cur.rowcount > 0
|
||||
|
||||
def delete_template():
|
||||
with pg.connect(conninfo="dbname=project host=127.0.0.1") as conn:
|
||||
with conn.cursor() as cur:
|
||||
for sql in sql_drop:
|
||||
with open(sql, "r", encoding="utf-8") as f:
|
||||
cur.execute(f.read())
|
||||
print(f'executed {sql}')
|
||||
conn.commit()
|
||||
with pg.connect(conninfo="dbname=postgres host=127.0.0.1", autocommit=True) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("drop database project")
|
||||
|
||||
if __name__ == "__main__":
|
||||
if (have_template()):
|
||||
delete_template()
|
||||
create_template()
|
||||
12
scripts/delete_project.py
Normal file
12
scripts/delete_project.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import sys
|
||||
from tjnetwork import *
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("delete_project name")
|
||||
return
|
||||
|
||||
delete_project(sys.argv[1])
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
7
scripts/demo.py
Normal file
7
scripts/demo.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from tjnetwork import *
|
||||
read_inp("beibeizone","beibeizone.inp")
|
||||
#open_project('beibeizone')
|
||||
#generate_service_area("beibeizone",0.00001)
|
||||
|
||||
print(list_project())
|
||||
|
||||
21
scripts/dev.py
Normal file
21
scripts/dev.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from tjnetwork import *
|
||||
|
||||
p = 'dev'
|
||||
|
||||
read_inp(p, f'./inp/net3.inp', '3')
|
||||
open_project(p)
|
||||
|
||||
sass = calculate_service_area(p)
|
||||
assert len(sass) == 25
|
||||
|
||||
assert sass[0]['River'] == ['River', '60', '61', '123', '601']
|
||||
assert sass[0]['3'] == ['121', '120', '119', '117', '257', '151', '157', '115', '259', '261', '149', '159', '111', '113', '263', '147', '161', '197', '193', '105', '145', '163', '195', '191', '267', '107', '141', '164', '265', '187', '189', '143', '166', '169', '204', '15', '167', '171', '269', '173', '271', '199', '201', '203', '3', '20', '127', '125', '129', '153', '131', '139']
|
||||
assert sass[0]['1'] == ['185', '184', '205', '273', '1', '40', '179', '177', '183', '181', '35']
|
||||
assert sass[0]['2'] == ['207', '275', '2', '50', '255', '247', '253', '251', '241', '249', '239', '243', '237', '211', '229', '209', '213', '231', '208', '215', '206', '217', '219', '225']
|
||||
|
||||
print(sass[1])
|
||||
assert sass[0]['River'] == ['River', '60', '61', '123', '601']
|
||||
assert sass[0]['3'] == ['121', '120', '119', '117', '257', '151', '157', '115', '259', '261', '149', '159', '111', '113', '263', '147', '161', '197', '193', '145', '163', '195', '191', '141', '164', '265', '187', '143', '166', '169', '267', '204', '15', '167', '171', '269', '173', '199', '201', '203', '3', '20', '127', '125', '129', '153', '131', '139']
|
||||
assert sass[0]['Lake'] == ['105', '107', 'Lake', '10', '101', '103', '109']
|
||||
assert sass[0]['1'] == ['189', '185', '271', '184', '205', '273', '1', '40', '179', '177', '183', '181', '35']
|
||||
assert sass[0]['2'] == ['207', '275', '2', '50', '255', '247', '253', '251', '241', '249', '239', '243', '237', '211', '229', '209', '213', '231', '208', '215', '206', '217', '219', '225']
|
||||
23
scripts/drawpipe.py
Normal file
23
scripts/drawpipe.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import json
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
def draw_pipe():
|
||||
# 读取json文件
|
||||
with open(r'C:\Users\dingsu\Desktop\links_coordinates.json', 'r') as f:
|
||||
pipe_data = json.load(f)
|
||||
|
||||
# 创建一个空列表来存储所有点的坐标
|
||||
|
||||
# 将两个点连接成一条线,然后每条线都绘制出来
|
||||
for item in pipe_data:
|
||||
x1 = item[0][0]
|
||||
y1 = item[0][1]
|
||||
x2 = item[1][0]
|
||||
y2 = item[1][1]
|
||||
|
||||
plt.plot([x1, x2], [y1, y2], 'r-')
|
||||
|
||||
plt.show()
|
||||
|
||||
if __name__ == "__main__":
|
||||
draw_pipe()
|
||||
12
scripts/dump_inp.py
Normal file
12
scripts/dump_inp.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import sys
|
||||
from tjnetwork import *
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("dump_inp name")
|
||||
return
|
||||
|
||||
dump_inp(sys.argv[1], f'{sys.argv[1]}.inp', '2')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
66
scripts/get_current_status.py
Normal file
66
scripts/get_current_status.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from tjnetwork import *
|
||||
from get_realValue import *
|
||||
from get_hist_data import *
|
||||
import datetime
|
||||
from api.s36_wda_cal import *
|
||||
|
||||
|
||||
ids=['2498','3854','3853','2510','2514','4780','4854']
|
||||
cur_data=None
|
||||
|
||||
|
||||
def get_latest_cal_time()->datetime:
|
||||
current_time=datetime.datetime.now()
|
||||
return current_time
|
||||
|
||||
|
||||
def get_current_data(str_datetime: str=None)->bool:
|
||||
global cur_data
|
||||
if str_datetime==None:
|
||||
cur_data=get_realValue(ids)
|
||||
else:
|
||||
cur_date=get_hist_data(ids,str_datetime)
|
||||
if cur_data ==None:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_current_total_Q(str_dt:str='')->float:
|
||||
q_ids=['2498','3854','3853']
|
||||
q_dn900=cur_data[q_ids[0]]
|
||||
q_dn500=cur_data[q_ids[1]]
|
||||
q_dn1000=cur_data[q_ids[2]]
|
||||
total_q=q_dn1000+q_dn500+q_dn900
|
||||
return total_q
|
||||
|
||||
def get_h_pressure()->float:
|
||||
head_id='2510'
|
||||
h_pressure=cur_data[head_id]
|
||||
return h_pressure
|
||||
|
||||
def get_l_pressure()->float:
|
||||
head_id='2514'
|
||||
l_pressure=cur_data[head_id]
|
||||
return l_pressure
|
||||
|
||||
def get_h_tank_leve()->float:
|
||||
h_tank_id='4780'
|
||||
h_tank_level=cur_data[h_tank_id]
|
||||
return h_tank_level
|
||||
|
||||
def get_l_tank_leve()->float:
|
||||
l_tank_id='4854'
|
||||
l_tank_level=cur_data[l_tank_id]
|
||||
return l_tank_level
|
||||
|
||||
|
||||
# test interface
|
||||
if __name__ == '__main__':
|
||||
# if get_current_data()==True:
|
||||
# tQ=get_current_total_Q()
|
||||
# print(f"the current tQ is {tQ}\n")
|
||||
# data=get_hist_data(ids,conver_beingtime_to_ucttime('2024-04-10 15:05:00'),conver_beingtime_to_ucttime('2024-04-10 15:10:00'))
|
||||
open_project("beibeizone")
|
||||
regions=get_all_service_area_ids("beibeizone")
|
||||
for region in regions:
|
||||
t_basedmds=api.s36_wda_cal.get_total_base_demand("beibeizone",region)
|
||||
print(f"{region}:{t_basedmds}")
|
||||
6
scripts/get_current_total_Q.py
Normal file
6
scripts/get_current_total_Q.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from tjnetwork import *
|
||||
from get_realValue import *
|
||||
def get_current_total_Q():
|
||||
ids=['3489']
|
||||
total_q=get_realValue(ids)
|
||||
return total_q
|
||||
167
scripts/get_data.py
Normal file
167
scripts/get_data.py
Normal file
@@ -0,0 +1,167 @@
|
||||
import requests
|
||||
from datetime import datetime
|
||||
import pytz
|
||||
from typing import List, Dict, Union, Optional
|
||||
import csv
|
||||
|
||||
# get_data 是用来获取 历史数据,也就是非实时数据的接口
|
||||
# get_realtime 是用来获取 实时数据
|
||||
|
||||
def convert_timestamp_to_beijing_time(timestamp: Union[int, float]) -> datetime:
|
||||
# 将毫秒级时间戳转换为秒级时间戳
|
||||
timestamp_seconds = timestamp / 1000
|
||||
|
||||
# 将时间戳转换为datetime对象
|
||||
utc_time = datetime.utcfromtimestamp(timestamp_seconds)
|
||||
|
||||
# 设定UTC时区
|
||||
utc_timezone = pytz.timezone('UTC')
|
||||
|
||||
# 转换为北京时间
|
||||
beijing_timezone = pytz.timezone('Asia/Shanghai')
|
||||
beijing_time = utc_time.replace(tzinfo=utc_timezone).astimezone(beijing_timezone)
|
||||
|
||||
return beijing_time
|
||||
|
||||
|
||||
def beijing_time_to_utc(beijing_time_str: str) -> str:
|
||||
# 定义北京时区
|
||||
beijing_timezone = pytz.timezone('Asia/Shanghai')
|
||||
|
||||
# 将字符串转换为datetime对象
|
||||
beijing_time = datetime.strptime(beijing_time_str, '%Y-%m-%d %H:%M:%S')
|
||||
|
||||
# 本地化时间对象
|
||||
beijing_time = beijing_timezone.localize(beijing_time)
|
||||
|
||||
# 转换为UTC时间
|
||||
utc_time = beijing_time.astimezone(pytz.utc)
|
||||
|
||||
# 转换为ISO 8601格式的字符串
|
||||
return utc_time.strftime('%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
|
||||
def get_history_data(ids: str, begin_date: str, end_date: str, downsample: Optional[str]) -> List[Dict[str, Union[str, datetime, int, float]]]:
|
||||
# def get_history_data(ids: str, begin_date: str, end_date: str, downsample: Optional[str]) -> None:
|
||||
# 转换输入的北京时间为UTC时间
|
||||
begin_date_utc = beijing_time_to_utc(begin_date)
|
||||
end_date_utc = beijing_time_to_utc(end_date)
|
||||
|
||||
# 数据接口的地址
|
||||
url = 'http://183.64.62.100:9057/loong/api/curves/data'
|
||||
# url = 'http://10.101.15.16:9000/loong/api/curves/data'
|
||||
# url_path = 'http://10.101.15.16:9000/loong' # 内网
|
||||
|
||||
# 设置 GET 请求的参数
|
||||
params = {
|
||||
'ids': ids,
|
||||
'beginDate': begin_date_utc,
|
||||
'endDate': end_date_utc,
|
||||
'downsample': downsample
|
||||
}
|
||||
|
||||
history_data_list =[]
|
||||
|
||||
try:
|
||||
# 发送 GET 请求获取数据
|
||||
response = requests.get(url, params=params)
|
||||
|
||||
# 检查响应状态码,200 表示请求成功
|
||||
if response.status_code == 200:
|
||||
# 解析响应的 JSON 数据
|
||||
data = response.json()
|
||||
# 这里可以对获取到的数据进行进一步处理
|
||||
|
||||
# 打印 'mpointId' 和 'mpointName'
|
||||
for item in data['items']:
|
||||
mpoint_id = str(item['mpointId'])
|
||||
mpoint_name = item['mpointName']
|
||||
# print("mpointId:", item['mpointId'])
|
||||
# print("mpointName:", item['mpointName'])
|
||||
|
||||
# 打印 'dataDate' 和 'dataValue'
|
||||
for item_data in item['data']:
|
||||
# 将时间戳转换为北京时间
|
||||
beijing_time = convert_timestamp_to_beijing_time(item_data['dataDate'])
|
||||
data_value = item_data['dataValue']
|
||||
# 创建一个字典存储每条数据
|
||||
data_dict = {
|
||||
'time': beijing_time,
|
||||
'device_ID': str(mpoint_id),
|
||||
'description': mpoint_name,
|
||||
# 'dataDate (Beijing Time)': beijing_time.strftime('%Y-%m-%d %H:%M:%S'),
|
||||
'monitored_value': data_value # 保留原有类型
|
||||
}
|
||||
|
||||
history_data_list.append(data_dict)
|
||||
else:
|
||||
# 如果请求不成功,打印错误信息
|
||||
print("请求失败,状态码:", response.status_code)
|
||||
|
||||
except Exception as e:
|
||||
# 捕获异常
|
||||
print("发生异常:", e)
|
||||
|
||||
return history_data_list
|
||||
|
||||
|
||||
# 使用示例
|
||||
# data_list = get_history_data(ids='9572',
|
||||
# begin_date='2025-02-08 06:00:00',
|
||||
# end_date='2025-02-08 12:00:00',
|
||||
# downsample='1m')
|
||||
#
|
||||
# # 打印数据列表
|
||||
# for data in data_list:
|
||||
# print(data)
|
||||
|
||||
# # 定义 CSV 文件的路径
|
||||
# csv_file_path = './influxdb_data_4984.csv'
|
||||
# # 将数据写入 CSV 文件
|
||||
# # with open(csv_file_path, mode='w', newline='') as file:
|
||||
# # writer = csv.writer(file)
|
||||
# #
|
||||
# # # 写入表头
|
||||
# # writer.writerow(['measurement', 'mpointId', 'date', 'dataValue', 'datetime'])
|
||||
# #
|
||||
# # # 写入数据
|
||||
# # for data in data_list:
|
||||
# # measurement = data['mpointName']
|
||||
# # mpointId = data['mpointId']
|
||||
# # date = data['datetime'].strftime('%Y-%m-%d')
|
||||
# # dataValue = data['dataValue']
|
||||
# # datetime_str = data['datetime']
|
||||
# #
|
||||
# # # 写入一行
|
||||
# # writer.writerow([measurement, mpointId, date, dataValue, datetime_str])
|
||||
# #
|
||||
# #
|
||||
# # print(f"数据已保存到 {csv_file_path}")
|
||||
#
|
||||
# filtered_csv_file_path = './filtered_influxdb_data_4984.csv'
|
||||
# #
|
||||
# # # # 读取并筛选数据
|
||||
# data_list1 = []
|
||||
#
|
||||
# with open(csv_file_path, mode='r') as file:
|
||||
# csv_reader = csv.DictReader(file)
|
||||
# for row in csv_reader:
|
||||
# # 将 datetime 列解析为 datetime 对象
|
||||
# datetime_value = datetime.strptime(row['datetime'], '%Y-%m-%d %H:%M:%S%z')
|
||||
#
|
||||
# # 只保留时间为 15 分钟倍数的行
|
||||
# if datetime_value.minute % 15 == 0:
|
||||
# data_list1.append(row)
|
||||
#
|
||||
# # 将筛选后的数据写入新的 CSV 文件
|
||||
# with open(filtered_csv_file_path, mode='w', newline='') as file:
|
||||
# writer = csv.writer(file)
|
||||
#
|
||||
# # 写入表头
|
||||
# writer.writerow(['measurement', 'mpointId', 'date', 'dataValue', 'datetime'])
|
||||
#
|
||||
# # 写入筛选后的数据
|
||||
# for data in data_list1:
|
||||
# writer.writerow([data['measurement'], data['mpointId'], data['date'], data['dataValue'], data['datetime']])
|
||||
#
|
||||
# print(f"筛选后的数据已保存到 {filtered_csv_file_path}")
|
||||
80
scripts/get_hist_data.py
Normal file
80
scripts/get_hist_data.py
Normal file
@@ -0,0 +1,80 @@
|
||||
import requests
|
||||
from datetime import datetime
|
||||
import pytz
|
||||
|
||||
|
||||
def convert_timestamp_to_beijing_time(timestamp):
|
||||
# 将毫秒级时间戳转换为秒级时间戳
|
||||
timestamp_seconds = timestamp / 1000
|
||||
|
||||
# 将时间戳转换为datetime对象
|
||||
utc_time = datetime.utcfromtimestamp(timestamp_seconds)
|
||||
|
||||
# 设定UTC时区
|
||||
utc_timezone = pytz.timezone('UTC')
|
||||
|
||||
# 转换为北京时间
|
||||
beijing_timezone = pytz.timezone('Asia/Shanghai')
|
||||
beijing_time = utc_time.replace(tzinfo=utc_timezone).astimezone(beijing_timezone)
|
||||
|
||||
return beijing_time
|
||||
|
||||
def conver_beingtime_to_ucttime(timestr:str):
|
||||
beijing_time=datetime.strptime(timestr,'%Y-%m-%d %H:%M:%S')
|
||||
utc_time=beijing_time.astimezone(pytz.utc)
|
||||
str_utc=utc_time.strftime('%Y-%m-%dT%H:%M:%SZ')
|
||||
#print(str_utc)
|
||||
return str_utc
|
||||
|
||||
def get_hist_data(ids, begin_date,end_date)->dict[str,dict[datetime,float]]:
|
||||
# 数据接口的地址
|
||||
url = 'http://183.64.62.100:9057/loong/api/curves/data'
|
||||
|
||||
# 设置 GET 请求的参数
|
||||
params = {
|
||||
'ids': ids,
|
||||
'beginDate': begin_date,
|
||||
'endDate': end_date
|
||||
}
|
||||
lst_data={}
|
||||
try:
|
||||
# 发送 GET 请求获取数据
|
||||
response = requests.get(url, params=params)
|
||||
|
||||
# 检查响应状态码,200 表示请求成功
|
||||
if response.status_code == 200:
|
||||
# 解析响应的 JSON 数据
|
||||
data = response.json()
|
||||
# 这里可以对获取到的数据进行进一步处理
|
||||
|
||||
# 打印 'mpointId' 和 'mpointName'
|
||||
for item in data['items']:
|
||||
#print("mpointId:", item['mpointId'])
|
||||
#print("mpointName:", item['mpointName'])
|
||||
|
||||
# 打印 'dataDate' 和 'dataValue'
|
||||
data_seriers={}
|
||||
for item_data in item['data']:
|
||||
# print("dataDate:", item_data['dataDate'])
|
||||
# 将时间戳转换为北京时间
|
||||
beijing_time = convert_timestamp_to_beijing_time(item_data['dataDate'])
|
||||
print("dataDate (Beijing Time):", beijing_time.strftime('%Y-%m-%d %H:%M:%S'))
|
||||
print("dataValue:", item_data['dataValue'])
|
||||
print() # 打印空行分隔不同条目
|
||||
r=float(item_data['dataValue'])
|
||||
data_seriers[beijing_time]=r
|
||||
lst_data[item['mpointId']]=data_seriers
|
||||
return lst_data
|
||||
else:
|
||||
# 如果请求不成功,打印错误信息
|
||||
print("请求失败,状态码:", response.status_code)
|
||||
|
||||
except Exception as e:
|
||||
# 捕获异常
|
||||
print("发生异常:", e)
|
||||
|
||||
|
||||
# 使用示例
|
||||
# get_hist_data(ids='2498,2500',
|
||||
# begin_date='2024-03-31T16:00:00Z',
|
||||
# end_date='2024-04-01T16:00:00Z')
|
||||
73
scripts/get_realValue.py
Normal file
73
scripts/get_realValue.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import requests
|
||||
from datetime import datetime
|
||||
import pytz
|
||||
from typing import List, Dict, Union, Tuple
|
||||
|
||||
|
||||
def convert_to_beijing_time(utc_time_str):
|
||||
# 解析UTC时间字符串为datetime对象
|
||||
utc_time = datetime.strptime(utc_time_str, '%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
# 设定UTC时区
|
||||
utc_timezone = pytz.timezone('UTC')
|
||||
|
||||
# 转换为北京时间
|
||||
beijing_timezone = pytz.timezone('Asia/Shanghai')
|
||||
beijing_time = utc_time.replace(tzinfo=utc_timezone).astimezone(beijing_timezone)
|
||||
|
||||
return beijing_time
|
||||
|
||||
|
||||
def get_realValue(ids) -> List[Dict[str, Union[str, datetime, int, float]]]:
|
||||
# 数据接口的地址
|
||||
url = 'http://183.64.62.100:9057/loong/api/mpoints/realValue'
|
||||
# url = 'http://10.101.15.16:9000/loong/api/mpoints/realValue' # 内网
|
||||
|
||||
# 设置GET请求的参数
|
||||
params = {
|
||||
'ids': ids
|
||||
}
|
||||
# 创建一个字典来存储数据
|
||||
data_list = []
|
||||
|
||||
try:
|
||||
# 发送GET请求获取数据
|
||||
response = requests.get(url, params=params)
|
||||
|
||||
# 检查响应状态码,200表示请求成功
|
||||
if response.status_code == 200:
|
||||
# 解析响应的JSON数据
|
||||
data = response.json()
|
||||
|
||||
# 只打印'id'、'datadt'和'realValue'数据
|
||||
for realValue in data:
|
||||
# print("id:", realValue['id'])
|
||||
# print("mpointName:",realValue['mpointName'])
|
||||
# print("datadt:", realValue['datadt'])
|
||||
# 转换datadt字段为北京时间
|
||||
beijing_time = convert_to_beijing_time(realValue['datadt'])
|
||||
# print("datadt (Beijing Time):", beijing_time.strftime('%Y-%m-%d %H:%M:%S'))
|
||||
# print("realValue:", realValue['realValue'])
|
||||
# print() # 打印空行分隔不同条目
|
||||
# 将数据添加到字典中,值为一个字典,包含其他需要的字段
|
||||
data_list.append({
|
||||
'device_ID': realValue['id'],
|
||||
'description': realValue['mpointName'],
|
||||
'time': beijing_time.strftime('%Y-%m-%d %H:%M:%S'),
|
||||
'monitored_value': realValue['realValue']
|
||||
})
|
||||
|
||||
else:
|
||||
# 如果请求不成功,打印错误信息
|
||||
print("请求失败,状态码:", response.status_code)
|
||||
|
||||
except Exception as e:
|
||||
# 捕获异常
|
||||
print("发生异常:", e)
|
||||
|
||||
return data_list
|
||||
|
||||
|
||||
# 使用示例
|
||||
# data_list = get_realValue(ids='2498,2500')
|
||||
# print(data_list)
|
||||
72
scripts/install.py
Normal file
72
scripts/install.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
def install():
|
||||
if sys.version_info.major != 3:
|
||||
print("Require install Python 3.x !")
|
||||
return
|
||||
|
||||
minor = sys.version_info.minor
|
||||
if minor < 4 or minor > 12:
|
||||
print("Require install Python 3.4 ~ Python 3.12 !")
|
||||
return
|
||||
|
||||
# upgrade pipe
|
||||
os.system('python -m pip install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple')
|
||||
|
||||
# install package
|
||||
packages = [
|
||||
'"psycopg[binary]"',
|
||||
'pytest',
|
||||
'influxdb_client',
|
||||
'numpy',
|
||||
'fastapi',
|
||||
"msgpack",
|
||||
'schedule',
|
||||
'pandas',
|
||||
'openpyxl',
|
||||
'redis',
|
||||
'pydantic',
|
||||
'python-dateutil',
|
||||
'starlette',
|
||||
'requests',
|
||||
'uvicorn',
|
||||
'chardet',
|
||||
'py-linq',
|
||||
'python-multipart',
|
||||
'Cython',
|
||||
'geopandas',
|
||||
'sqlalchemy',
|
||||
'networkx',
|
||||
'wntr',
|
||||
'scipy',
|
||||
'scikit-learn',
|
||||
'scikit-fuzzy',
|
||||
'libpysal',
|
||||
'spopt',
|
||||
'shapely',
|
||||
'geopandas',
|
||||
'passlib',
|
||||
'jose'
|
||||
]
|
||||
|
||||
if minor == 4:
|
||||
packages.append('script/package/PyMetis-2018.1-cp34-cp34m-win_amd64.whl')
|
||||
elif minor == 5:
|
||||
packages.append('script/package/PyMetis-2019.1.1-cp35-cp35m-win_amd64.whl')
|
||||
elif minor == 6:
|
||||
packages.append('script/package/PyMetis-2019.1.1-cp36-cp36m-win_amd64.whl')
|
||||
elif minor == 7:
|
||||
packages.append('script/package/PyMetis-2020.1-cp37-cp37m-win_amd64.whl')
|
||||
elif minor == 8:
|
||||
packages.append('script/package/PyMetis-2020.1-cp38-cp38-win_amd64.whl')
|
||||
elif minor == 9:
|
||||
packages.append('script/package/PyMetis-2020.1-cp39-cp39-win_amd64.whl')
|
||||
elif minor == 10:
|
||||
packages.append('script/package/PyMetis-2020.1-cp310-cp310-win_amd64.whl')
|
||||
|
||||
for package in packages:
|
||||
os.system(f'pip install {package} -i https://pypi.tuna.tsinghua.edu.cn/simple')
|
||||
|
||||
if __name__ == '__main__':
|
||||
install()
|
||||
8
scripts/open_szh.py
Normal file
8
scripts/open_szh.py
Normal file
@@ -0,0 +1,8 @@
|
||||
import sys
|
||||
from tjnetwork import *
|
||||
|
||||
def main():
|
||||
open_project('szh')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
5
scripts/redis_clear_all_keys.py
Normal file
5
scripts/redis_clear_all_keys.py
Normal file
@@ -0,0 +1,5 @@
|
||||
import redis
|
||||
|
||||
redis_client = redis.Redis(host="127.0.0.1", port=6379, db=0)
|
||||
matched_keys = redis_client.keys(f"**")
|
||||
redis_client.delete(*matched_keys)
|
||||
5
scripts/restartpg.bat
Normal file
5
scripts/restartpg.bat
Normal file
@@ -0,0 +1,5 @@
|
||||
C:
|
||||
cd "C:\pg-14.7\bin"
|
||||
pg_ctl -D ../data -l logfile stop
|
||||
pg_ctl -D ../data -l logfile start
|
||||
cd "c:\SourceCode\Server"
|
||||
15
scripts/restore_project.py
Normal file
15
scripts/restore_project.py
Normal file
@@ -0,0 +1,15 @@
|
||||
import sys
|
||||
from tjnetwork import *
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("restore_project name")
|
||||
return
|
||||
|
||||
p = sys.argv[1]
|
||||
open_project(p)
|
||||
restore(p)
|
||||
close_project(p)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
11
scripts/restore_projects.py
Normal file
11
scripts/restore_projects.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from tjnetwork import *
|
||||
|
||||
def main():
|
||||
for p in list_project():
|
||||
print(f'restore {p}...')
|
||||
open_project(p)
|
||||
restore(p)
|
||||
close_project(p)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
18
scripts/run_server.py
Normal file
18
scripts/run_server.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import asyncio
|
||||
import sys
|
||||
import uvicorn
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Windows 设置事件循环策略
|
||||
if sys.platform == "win32":
|
||||
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||
|
||||
# 用 uvicorn.run 支持 workers 参数
|
||||
uvicorn.run(
|
||||
"app.main:app",
|
||||
host="0.0.0.0",
|
||||
port=8000,
|
||||
workers=2, # 这里可以设置多进程
|
||||
loop="asyncio",
|
||||
)
|
||||
8
scripts/run_simlation.py
Normal file
8
scripts/run_simlation.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from tjnetwork import *
|
||||
from get_current_status import *
|
||||
|
||||
def run_simulation(cur_datetime:str=None)->str:
|
||||
|
||||
open_project('beibei_skeleton')
|
||||
|
||||
return
|
||||
856
scripts/run_simulation.py
Normal file
856
scripts/run_simulation.py
Normal file
@@ -0,0 +1,856 @@
|
||||
import numpy as np
|
||||
from tjnetwork import *
|
||||
from api.s36_wda_cal import *
|
||||
# from get_real_status import *
|
||||
from datetime import datetime,timedelta
|
||||
from math import modf
|
||||
import json
|
||||
import pytz
|
||||
import requests
|
||||
import time
|
||||
import project_info
|
||||
|
||||
url_path = 'http://10.101.15.16:9000/loong' # 内网
|
||||
# url_path = 'http://183.64.62.100:9057/loong' # 外网
|
||||
url_real = url_path + '/api/mpoints/realValue'
|
||||
url_hist = url_path + '/api/curves/data'
|
||||
|
||||
PATTERN_TIME_STEP=15.0
|
||||
DN_900_ID='2498'
|
||||
DN_500_ID='3854'
|
||||
DN_1000_ID='3853'
|
||||
H_RESSURE='2510'
|
||||
L_PRESURE='2514'
|
||||
H_TANK='4780'
|
||||
L_TANK='4854'
|
||||
|
||||
H_REGION_1='SA_ZBBDJSCP000002'
|
||||
H_REGION_2='' #to do
|
||||
L_REGION_1='SA_ZBBDTJSC000001'
|
||||
L_REGION_2='SA_R00003'
|
||||
|
||||
# reservoir basic height
|
||||
RESERVOIR_BASIC_HEIGHT = float(250.35)
|
||||
|
||||
# regions
|
||||
regions = ['hp', 'lp']
|
||||
regions_demand_patterns = {'hp': ['DN900', 'DN500'], 'lp': ['DN1000']} # 出厂水量近似表示用水量
|
||||
regions_patterns = {'hp': ['ChuanYiJiXiao', 'BeiQuanHuaYuan', 'ZhuangYuanFuDi', 'JingNingJiaYuan',
|
||||
'308', 'JiaYinYuan', 'XinChengGuoJi', 'YiJingBeiChen', 'ZhongYangXinDu',
|
||||
'XinHaiJiaYuan', 'DongFengJie', 'DingYaXinYu', 'ZiYunTai', 'XieMaGuangChang',
|
||||
'YongJinFu', 'BianDianZhan', 'BeiNanDaDao', 'TianShengLiJie', 'XueYuanXiaoQu',
|
||||
'YunHuaLu', 'GaoJiaQiao', 'LuZuoFuLuXiaDuan', 'TianRunCheng', 'CaoJiaBa',
|
||||
'PuLingChang', 'QiLongXiaoQu', 'TuanXiao',
|
||||
'TuanShanBaoZhongShiHua', 'XieMa', 'BeiWenQuanJiuHaoErQi', 'LaiYinHuSiQi',
|
||||
'DN500', 'DN900'],
|
||||
'lp': ['PanXiMingDu', 'WanKeJinYuHuaFuGaoCeng', 'KeJiXiao',
|
||||
'LuGouQiao', 'LongJiangHuaYuan', 'LaoQiZhongDui', 'ShiYanCun', 'TianQiDaSha',
|
||||
'TianShengPaiChuSuo', 'TianShengShangPin', 'JiaoTang', 'RenMinHuaYuan',
|
||||
'TaiJiBinJiangYiQi', 'TianQiHuaYuan', 'TaiJiBinJiangErQi', '122Zhong',
|
||||
'WanKeJinYuHuaFuYangFang', 'ChengBeiCaiShiKou', 'WenXingShe', 'YueLiangTianBBGJCZ',
|
||||
'YueLiangTian', 'YueLiangTian200', 'ChengTaoChang', 'HuoCheZhan', 'LiangKu', 'QunXingLu',
|
||||
'JiuYuanErTongYiYuan', 'TangDouHua', 'TaiJiBinJiangErQi(SanJi)',
|
||||
'ZhangDouHua', 'JinYunXiaoQuDN400',
|
||||
'DN1000']}
|
||||
|
||||
# nodes
|
||||
monitor_single_patterns = ['ChuanYiJiXiao', 'BeiQuanHuaYuan', 'ZhuangYuanFuDi', 'JingNingJiaYuan',
|
||||
'308', 'JiaYinYuan', 'XinChengGuoJi', 'YiJingBeiChen', 'ZhongYangXinDu',
|
||||
'XinHaiJiaYuan', 'DongFengJie', 'DingYaXinYu', 'ZiYunTai', 'XieMaGuangChang',
|
||||
'YongJinFu', 'PanXiMingDu', 'WanKeJinYuHuaFuGaoCeng', 'KeJiXiao',
|
||||
'LuGouQiao', 'LongJiangHuaYuan', 'LaoQiZhongDui', 'ShiYanCun', 'TianQiDaSha',
|
||||
'TianShengPaiChuSuo', 'TianShengShangPin', 'JiaoTang', 'RenMinHuaYuan',
|
||||
'TaiJiBinJiangYiQi', 'TianQiHuaYuan', 'TaiJiBinJiangErQi', '122Zhong',
|
||||
'WanKeJinYuHuaFuYangFang']
|
||||
monitor_single_patterns_id = {'ChuanYiJiXiao': '7338', 'BeiQuanHuaYuan': '7315', 'ZhuangYuanFuDi': '7316',
|
||||
'JingNingJiaYuan': '7528', '308': '8272', 'JiaYinYuan': '7304',
|
||||
'XinChengGuoJi': '7325', 'YiJingBeiChen': '7328', 'ZhongYangXinDu': '7329',
|
||||
'XinHaiJiaYuan': '9138', 'DongFengJie': '7302', 'DingYaXinYu': '7331',
|
||||
'ZiYunTai': '7420,9059', 'XieMaGuangChang': '7326', 'YongJinFu': '9059',
|
||||
'PanXiMingDu': '7320', 'WanKeJinYuHuaFuGaoCeng': '7419',
|
||||
'KeJiXiao': '7305', 'LuGouQiao': '7306', 'LongJiangHuaYuan': '7318',
|
||||
'LaoQiZhongDui': '9075', 'ShiYanCun': '7309', 'TianQiDaSha': '7323',
|
||||
'TianShengPaiChuSuo': '7335', 'TianShengShangPin': '7324', 'JiaoTang': '7332',
|
||||
'RenMinHuaYuan': '7322', 'TaiJiBinJiangYiQi': '7333', 'TianQiHuaYuan': '8235',
|
||||
'TaiJiBinJiangErQi': '7334', '122Zhong': '7314', 'WanKeJinYuHuaFuYangFang': '7418'}
|
||||
|
||||
monitor_unity_patterns = ['BianDianZhan', 'BeiNanDaDao', 'TianShengLiJie', 'XueYuanXiaoQu',
|
||||
'YunHuaLu', 'GaoJiaQiao', 'LuZuoFuLuXiaDuan', 'TianRunCheng',
|
||||
'CaoJiaBa', 'PuLingChang', 'QiLongXiaoQu', 'TuanXiao',
|
||||
'ChengBeiCaiShiKou', 'WenXingShe', 'YueLiangTianBBGJCZ',
|
||||
'YueLiangTian', 'YueLiangTian200',
|
||||
'ChengTaoChang', 'HuoCheZhan', 'LiangKu', 'QunXingLu',
|
||||
'TuanShanBaoZhongShiHua', 'XieMa', 'BeiWenQuanJiuHaoErQi', 'LaiYinHuSiQi',
|
||||
'JiuYuanErTongYiYuan', 'TangDouHua', 'TaiJiBinJiangErQi(SanJi)',
|
||||
'ZhangDouHua', 'JinYunXiaoQuDN400',
|
||||
'DN500', 'DN900', 'DN1000']
|
||||
monitor_unity_patterns_id = {'BianDianZhan': '7339', 'BeiNanDaDao': '7319', 'TianShengLiJie': '8242',
|
||||
'XueYuanXiaoQu': '7327', 'YunHuaLu': '7312', 'GaoJiaQiao': '7340',
|
||||
'LuZuoFuLuXiaDuan': '7343', 'TianRunCheng': '7310', 'CaoJiaBa': '7300',
|
||||
'PuLingChang': '7307', 'QiLongXiaoQu': '7321', 'TuanXiao': '8963',
|
||||
'ChengBeiCaiShiKou': '7330', 'WenXingShe': '7311',
|
||||
'YueLiangTianBBGJCZ': '7313', 'YueLiangTian': '7313', 'YueLiangTian200': '7313',
|
||||
'ChengTaoChang': '7301', 'HuoCheZhan': '7303',
|
||||
'LiangKu': '7296', 'QunXingLu': '7308',
|
||||
'DN500': '3854', 'DN900': '2498', 'DN1000': '3853'}
|
||||
monitor_patterns = monitor_single_patterns + monitor_unity_patterns
|
||||
monitor_patterns_id = {**monitor_single_patterns_id, **monitor_unity_patterns_id}
|
||||
# pumps
|
||||
pumps_name = ['1#', '2#', '3#', '4#', '5#', '6#', '7#']
|
||||
pumps = ['PU00000', 'PU00001', 'PU00002', 'PU00003', 'PU00004', 'PU00005', 'PU00006']
|
||||
variable_frequency_pumps = ['PU00004', 'PU00005', 'PU00006']
|
||||
pumps_id = {'PU00000': '2747', 'PU00001': '2776', 'PU00002': '2730', 'PU00003': '2787',
|
||||
'PU00004': '2500', 'PU00005': '2502', 'PU00006': '2504'}
|
||||
# reservoirs
|
||||
reservoirs = ['ZBBDJSCP000002', 'R00003']
|
||||
reservoirs_id = {'ZBBDJSCP000002': '2497', 'R00003': '2571'}
|
||||
# tanks
|
||||
tanks = ['ZBBDTJSC000002', 'ZBBDTJSC000001']
|
||||
tanks_id = {'ZBBDTJSC000002': '4780', 'ZBBDTJSC000001': '9774'}
|
||||
|
||||
|
||||
class DataLoader:
|
||||
"""数据加载器"""
|
||||
def __init__(self, project_name, start_time: datetime, end_time: datetime,
|
||||
pumps_control: dict = None, tank_initial_level_control: dict = None,
|
||||
region_demand_control: dict = None, downloading_prohibition: bool = False):
|
||||
self.project_name = project_name # 数据库名
|
||||
self.current_time = self.round_time(datetime.now(pytz.timezone('Asia/Shanghai')), 1) # 圆整至整分钟
|
||||
self.current_round_time = self.round_time(self.current_time, int(PATTERN_TIME_STEP))
|
||||
self.updating_data_flag = True \
|
||||
if self.current_round_time == self.round_time(start_time, int(PATTERN_TIME_STEP)) \
|
||||
else False # 判断是否从当前时刻开始模拟(是否更新最新监测数据)
|
||||
self.downloading_prohibition = downloading_prohibition # 是否禁止下载数据(默认False: 允许下载)
|
||||
self.updating_data_flag = False if self.downloading_prohibition else self.updating_data_flag
|
||||
self.pattern_start_index = get_pattern_index(
|
||||
self.round_time(start_time, int(PATTERN_TIME_STEP)).strftime("%Y-%m-%d %H:%M:%S")) # pattern起始索引
|
||||
self.pattern_end_index = get_pattern_index(
|
||||
self.round_time(end_time, int(PATTERN_TIME_STEP)).strftime("%Y-%m-%d %H:%M:%S")) # pattern结束索引
|
||||
self.pattern_index_list = list(range(self.pattern_start_index, self.pattern_end_index + 1)) # pattern索引列表
|
||||
self.download_id = self.get_download_id() # 数据下载接口id '7338,7315,7316,...'
|
||||
self.current_time_download_data = dict(
|
||||
zip(self.download_id.split(','),
|
||||
[np.nan]*len(list(self.download_id.split(','))))
|
||||
) # {id(str): value(float)}
|
||||
self.current_time_download_data_flag = dict(
|
||||
zip(self.download_id.split(','),
|
||||
[False]*len(list(self.download_id.split(','))))
|
||||
) # 下载数据是否具备实时性, {id(str): flag(bool)}
|
||||
self.old_flow_data = self.init_dict_of_list(dict(
|
||||
zip(monitor_patterns,
|
||||
[[np.nan]] * (len(monitor_patterns)))
|
||||
)) # {pattern_name(str): flow(float)}
|
||||
self.old_pattern_factor = self.init_dict_of_list(dict(
|
||||
zip(monitor_patterns,
|
||||
[[np.nan]] * (len(monitor_patterns)))
|
||||
)) # {pattern_name(str): [pattern_factor(float)]}
|
||||
self.new_flow_data = self.init_dict_of_list(dict(
|
||||
zip(monitor_patterns,
|
||||
[[np.nan]] * (len(monitor_patterns)))
|
||||
)) # {pattern_name(str): flow(float)}
|
||||
self.new_pattern_factor = self.init_dict_of_list(dict(
|
||||
zip(monitor_patterns,
|
||||
[[np.nan]] * (len(monitor_patterns)))
|
||||
)) # {pattern_name(str): [pattern_factor(float)]}
|
||||
self.reservoir_data = dict(zip(reservoirs, [np.nan]*len(reservoirs))) # {reservoir_name(str): level(float)}
|
||||
self.tank_data = dict(zip(tanks, [np.nan] * len(tanks))) # {tank_name(str): level(float)}
|
||||
self.pump_data = self.init_dict_of_list(
|
||||
dict(zip(pumps, [[np.nan]]*len(pumps)))) # {pump_name(str): [frequency(float)]}
|
||||
self.pump_control = pumps_control # {pump_name(str): [frequency(float)]}
|
||||
self.tank_initial_level_control = tank_initial_level_control # {tank_name(str): level(float)}
|
||||
self.region_demand_current = dict(zip(regions, [0]*len(regions))) # {region_name(str): total_demand(float)}
|
||||
self.region_demand_control = region_demand_control # {region_name(str): total_demand(float)}
|
||||
self.region_demand_control_factor = dict(
|
||||
zip(regions, [1]*len(regions))) # 区域流量控制系数(用于调整用水量), {region_name(str): factor(float)}
|
||||
|
||||
def load_data(self):
|
||||
"""生成数据集"""
|
||||
self.download_data() # 下载实时数据
|
||||
self.get_old_pattern_and_flow() # 读取历史记录pattern信息
|
||||
self.cal_demand_convert_factor() # 计算用水量转换系数(设定用水量时)
|
||||
self.set_new_flow() # 设置'更新'流量
|
||||
self.set_new_pattern_factor() # 设置'更新'pattern factors
|
||||
self.set_reservoirs() # 设置清水池
|
||||
self.set_tanks() # 设置调节池
|
||||
self.set_pumps() # 设置水泵
|
||||
return self.pattern_start_index
|
||||
|
||||
def download_data(self):
|
||||
"""下载数据"""
|
||||
if self.updating_data_flag is True:
|
||||
print('{} -- Start downloading data.'.format(
|
||||
datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
|
||||
data_wait_flag = True
|
||||
while data_wait_flag:
|
||||
try:
|
||||
newest_data_time = self.download_real_data(self.download_id) # 获取实时数据
|
||||
except Exception as e:
|
||||
print('{}\nWaiting for real data.'.format(e))
|
||||
time.sleep(1)
|
||||
else:
|
||||
print('{} -- Downloading data ok. Newest timestamp: {}.'.format(
|
||||
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
||||
newest_data_time.strftime('%Y-%m-%d %H:%M:%S')))
|
||||
data_wait_flag = False
|
||||
|
||||
def cal_current_region_demand(self):
|
||||
"""计算区域当前用水量"""
|
||||
if self.updating_data_flag is True:
|
||||
for region in self.region_demand_current.keys():
|
||||
total_demand = 0
|
||||
for pipe in regions_demand_patterns[region]:
|
||||
total_demand += self.current_time_download_data[monitor_patterns_id[pipe]] # 出厂流量
|
||||
self.region_demand_current[region] = total_demand
|
||||
|
||||
def cal_history_region_demand(self, pattern_index_list):
|
||||
"""计算区域历史用水量(对应记录的pattern)"""
|
||||
old_demand = {}
|
||||
for region in regions:
|
||||
total_demand = 0
|
||||
for pipe_pattern_name in regions_demand_patterns[region]:
|
||||
old_flows, old_patterns = self.get_history_pattern_info(self.project_name, pipe_pattern_name)
|
||||
for idx in pattern_index_list:
|
||||
total_demand += old_flows[idx] / 4 # 15分钟水量
|
||||
old_demand[region] = total_demand
|
||||
return old_demand
|
||||
|
||||
def cal_demand_convert_factor(self):
|
||||
"""计算用水量转换系数(设定用水量时)"""
|
||||
self.cal_current_region_demand() # 计算区域当前时刻用水量
|
||||
old_demand_moment = self.cal_history_region_demand([self.pattern_start_index]) # 计算区域目标时刻总用水量
|
||||
old_demand_period = self.cal_history_region_demand(self.pattern_index_list) # 计算区域目标时段总用水量
|
||||
for region in regions:
|
||||
self.region_demand_control_factor[region] \
|
||||
= (self.region_demand_current[region] / 4) / old_demand_moment[region] \
|
||||
if self.updating_data_flag is True else 1
|
||||
self.region_demand_control_factor[region] = self.region_demand_control[region] / old_demand_period[region] \
|
||||
if (self.region_demand_control is not None) and (region in self.region_demand_control.keys()) \
|
||||
else self.region_demand_control_factor[region]
|
||||
|
||||
def get_old_pattern_and_flow(self):
|
||||
"""获取所有pattern的选定时段的历史记录的pattern和flow"""
|
||||
for idx in monitor_patterns: # 遍历patterns
|
||||
old_flows, old_patterns = self.get_history_pattern_info(self.project_name, idx)
|
||||
for pattern_idx in self.pattern_index_list:
|
||||
old_flow_data = old_flows[pattern_idx]
|
||||
old_pattern_factor = old_patterns[pattern_idx]
|
||||
if pattern_idx == self.pattern_start_index: # 起始时刻
|
||||
self.old_flow_data[idx][0] = old_flow_data
|
||||
self.old_pattern_factor[idx][0] = old_pattern_factor
|
||||
else:
|
||||
self.old_flow_data[idx].append(old_flow_data)
|
||||
self.old_pattern_factor[idx].append(old_pattern_factor)
|
||||
|
||||
def set_new_flow(self):
|
||||
"""计算模拟时段新流量(相较于历史记录)"""
|
||||
for idx in self.new_flow_data.keys(): # 遍历patterns
|
||||
region_name = None
|
||||
for region in regions_patterns.keys():
|
||||
if idx in regions_patterns[region]:
|
||||
region_name = region # pattern所属分区
|
||||
break
|
||||
# 实时流量
|
||||
if self.updating_data_flag is True:
|
||||
if idx in monitor_unity_patterns[-3:]: # 出水管流量
|
||||
self.new_flow_data[idx][0] = self.current_time_download_data[monitor_patterns_id[idx]]
|
||||
else: # 其余流量
|
||||
self.new_flow_data[idx][0] \
|
||||
= self.region_demand_control_factor[region_name] * self.old_flow_data[idx][0]
|
||||
# if idx == 'ZiYunTai':
|
||||
# idx_a, idx_b = monitor_patterns_id[idx].split(',')
|
||||
# self.new_flow_data[idx][0] \
|
||||
# = self.current_time_download_data[idx_a] - self.current_time_download_data[idx_b]
|
||||
# else:
|
||||
# self.new_flow_data[idx][0] = self.current_time_download_data[monitor_patterns_id[idx]]
|
||||
# for data_id in monitor_patterns_id[idx].split(','):
|
||||
# if (self.current_time_download_data_flag[data_id] is False) \
|
||||
# and (idx not in [pipe for pipe_list in regions_demand_patterns.values()
|
||||
# for pipe in pipe_list]): # 无法获取实时数据
|
||||
# self.new_flow_data[idx][0] \
|
||||
# = self.region_demand_control_factor[region_name] * self.old_flow_data[idx][0]
|
||||
# break
|
||||
# 根据设定用水量修改新流量
|
||||
if (self.region_demand_control is not None) \
|
||||
and (region_name in self.region_demand_control.keys()):
|
||||
for pattern_idx in self.pattern_index_list:
|
||||
if pattern_idx == self.pattern_start_index: # 起始时刻
|
||||
self.new_flow_data[idx][0] \
|
||||
= self.region_demand_control_factor[region_name] * self.old_flow_data[idx][0]
|
||||
else:
|
||||
self.new_flow_data[idx].append(
|
||||
self.region_demand_control_factor[region_name]
|
||||
* self.old_flow_data[idx][self.pattern_index_list.index(pattern_idx)]
|
||||
)
|
||||
|
||||
def set_new_pattern_factor(self):
|
||||
"""更新计算选定时段(设定用水量)/时刻的pattern factor"""
|
||||
pattern_index_list = self.pattern_index_list \
|
||||
if self.region_demand_control is not None \
|
||||
else [self.pattern_start_index]
|
||||
for idx in monitor_patterns: # 遍历patterns
|
||||
for pattern_idx in pattern_index_list: # 遍历需要修改的pattern(index)
|
||||
pattern_idx_cls = pattern_index_list.index(pattern_idx) # 转换index(类表存储结构)
|
||||
old_flow_data = self.old_flow_data[idx][pattern_idx_cls]
|
||||
old_pattern_factor = self.old_pattern_factor[idx][pattern_idx_cls]
|
||||
if pattern_idx_cls == 0: # 起始时刻
|
||||
if idx in monitor_single_patterns:
|
||||
if not np.isnan(self.new_flow_data[idx][0]):
|
||||
self.new_pattern_factor[idx][0] = (self.new_flow_data[idx][0] * 1000 / 3600) # m3/h to L/s
|
||||
if idx in monitor_unity_patterns:
|
||||
if not np.isnan(self.new_flow_data[idx][0]):
|
||||
self.new_pattern_factor[idx][0] \
|
||||
= old_pattern_factor * self.new_flow_data[idx][0] / old_flow_data
|
||||
else:
|
||||
if idx in monitor_single_patterns:
|
||||
if len(self.new_flow_data[idx]) > pattern_idx_cls:
|
||||
self.new_pattern_factor[idx].append(
|
||||
(self.new_flow_data[idx][pattern_idx_cls] * 1000 / 3600)) # m3/h to L/s
|
||||
if idx in monitor_unity_patterns:
|
||||
if len(self.new_flow_data[idx]) > pattern_idx_cls:
|
||||
self.new_pattern_factor[idx].append(
|
||||
old_pattern_factor
|
||||
* self.new_flow_data[idx][pattern_idx_cls]
|
||||
/ old_flow_data)
|
||||
|
||||
def set_reservoirs(self):
|
||||
"""设置清水池"""
|
||||
if self.updating_data_flag is True:
|
||||
for idx in self.reservoir_data.keys():
|
||||
if self.current_time_download_data_flag[reservoirs_id[idx]] is False: # 无法获取实时数据
|
||||
print('There is no current data of reservoir: {}.'.format(idx))
|
||||
else:
|
||||
self.reservoir_data[idx] \
|
||||
= self.current_time_download_data[reservoirs_id[idx]] + RESERVOIR_BASIC_HEIGHT
|
||||
|
||||
def set_tanks(self):
|
||||
"""设置调节池"""
|
||||
for idx in self.tank_data.keys():
|
||||
if self.updating_data_flag is True:
|
||||
if self.current_time_download_data_flag[tanks_id[idx]] is False: # 无法获取实时数据
|
||||
print('There is no current data of tank: {}.'.format(idx))
|
||||
else:
|
||||
self.tank_data[idx] = self.current_time_download_data[tanks_id[idx]]
|
||||
self.tank_data[idx] = self.tank_initial_level_control[idx] \
|
||||
if (self.tank_initial_level_control is not None) and (idx in self.tank_initial_level_control) \
|
||||
else self.tank_data[idx]
|
||||
|
||||
def set_pumps(self):
|
||||
"""设置水泵"""
|
||||
for idx in self.pump_data.keys():
|
||||
if self.updating_data_flag is True:
|
||||
if self.current_time_download_data_flag[pumps_id[idx]] is False: # 无法获取实时数据
|
||||
print('There is no current data of pump: {}.'.format(idx))
|
||||
if (self.pump_control is not None) and (idx in self.pump_control.keys()):
|
||||
self.pump_data[idx] = self.pump_control[idx]
|
||||
else:
|
||||
self.pump_data[idx] = [self.current_time_download_data[pumps_id[idx]]]
|
||||
if (self.pump_control is not None) and (idx in self.pump_control.keys()):
|
||||
self.pump_data[idx] = self.pump_data[idx] + self.pump_control[idx] \
|
||||
if len(self.pump_control[idx]) < len(self.pattern_index_list) \
|
||||
else self.pump_control[idx] # 水泵设定
|
||||
else:
|
||||
if (self.pump_control is not None) and (idx in self.pump_control.keys()):
|
||||
self.pump_data[idx] = self.pump_control[idx]
|
||||
self.pump_data[idx] \
|
||||
= list(np.array(self.pump_data[idx]) / 50) \
|
||||
if idx in variable_frequency_pumps else self.pump_data[idx]
|
||||
|
||||
def set_valves(self):
|
||||
"""设置阀门"""
|
||||
pass
|
||||
|
||||
def download_real_data(self, ids: str):
|
||||
"""加载实时数据"""
|
||||
# 数据接口的地址
|
||||
global url_real
|
||||
# 设置GET请求的参数
|
||||
params = {'ids': ids}
|
||||
# 发送GET请求获取数据
|
||||
response = requests.get(url_real, params=params)
|
||||
# 检查响应状态码,200表示请求成功
|
||||
if response.status_code == 200:
|
||||
newest_data_time = None # 下载记录数据的最新时间
|
||||
# 解析响应的JSON数据
|
||||
data = response.json()
|
||||
for realValue in data: # 取出逐个id的数据
|
||||
data_time = convert_utc_to_bj(realValue['datadt']) # datetime
|
||||
self.current_time_download_data[str(realValue['id'])] \
|
||||
= float(realValue['realValue']) # {id(str): value(float)}
|
||||
if data_time > self.current_round_time.replace(tzinfo=None) - timedelta(minutes=5): # 下载数据为实时数据
|
||||
self.current_time_download_data_flag[str(realValue['id'])] = True
|
||||
if newest_data_time is None:
|
||||
newest_data_time = data_time
|
||||
else:
|
||||
newest_data_time = data_time if data_time > newest_data_time else newest_data_time # 更新最新时间
|
||||
if newest_data_time <= self.current_round_time.replace(tzinfo=None) - timedelta(minutes=5): # 最新记录时间早于当前时间
|
||||
warning_text = 'There is no current data with newest timestamp: {}.'.format(
|
||||
newest_data_time.strftime('%Y-%m-%d %H:%M:%S'))
|
||||
delta_time = self.current_round_time.replace(tzinfo=None) - newest_data_time
|
||||
if delta_time < timedelta(minutes=PATTERN_TIME_STEP): # 时间接近(可等待再次下载)
|
||||
raise Exception(warning_text)
|
||||
else:
|
||||
print(warning_text)
|
||||
self.updating_data_flag = False
|
||||
else:
|
||||
for idx in monitor_unity_patterns[-3:]: # 出水管流量
|
||||
if self.current_time_download_data_flag[monitor_patterns_id[idx]] is False: # 无法获取出水管流量的实时数据
|
||||
print('There is no current data of outflow: {}.'.format(idx))
|
||||
self.updating_data_flag = False
|
||||
if self.updating_data_flag is False:
|
||||
print('Abandon updating data with downloaded data.')
|
||||
return newest_data_time
|
||||
else:
|
||||
# 如果请求不成功,打印错误信息
|
||||
print("请求失败,状态码:", response.status_code)
|
||||
raise ConnectionError('Cannot download data.')
|
||||
|
||||
@ staticmethod
|
||||
def init_dict_of_list(dict_of_list):
|
||||
"""初始化值为列表的字典(重新生成列表地址, 防止指向同一列表)"""
|
||||
for idx in dict_of_list.keys():
|
||||
dict_of_list[idx] = dict_of_list[idx].copy()
|
||||
return dict_of_list
|
||||
|
||||
@ staticmethod
|
||||
def get_download_id():
|
||||
"""生成下载数据项的id"""
|
||||
# id_list = (list(monitor_single_patterns_id.values())
|
||||
# + list(monitor_unity_patterns_id.values())
|
||||
# + list(tanks_id.values())
|
||||
# + list(reservoirs_id.values())
|
||||
# + list(pumps_id.values()))
|
||||
id_list = (list(monitor_unity_patterns_id.values())[-3:]
|
||||
+ list(tanks_id.values())
|
||||
+ list(reservoirs_id.values())
|
||||
+ list(pumps_id.values()))
|
||||
id_list = sorted(set(id_list), key=id_list.index)
|
||||
if None in id_list:
|
||||
id_list.remove(None)
|
||||
return ','.join(id_list)
|
||||
|
||||
@ staticmethod
|
||||
def get_history_pattern_info(project_name, pattern_name):
|
||||
"""读取选定pattern的保存的历史pattern信息(flow, factor)"""
|
||||
factors_list = []
|
||||
flow_list = []
|
||||
patterns_info = read_all(project_name,
|
||||
f"select * from history_patterns_flows where id = '{pattern_name}' order by _order")
|
||||
for item in patterns_info:
|
||||
flow_list.append(float(item['flow']))
|
||||
factors_list.append(float(item['factor']))
|
||||
return flow_list, factors_list
|
||||
|
||||
@ staticmethod
|
||||
def judge_time(current_time, time_index_list):
|
||||
"""时间判断"""
|
||||
current_index \
|
||||
= time_index_list.index(current_time) if (current_time in time_index_list) else None
|
||||
return current_index
|
||||
|
||||
@staticmethod
|
||||
def get_time_index_list(start_time: datetime, end_time: datetime, step: int):
|
||||
"""生成时间索引"""
|
||||
time_index_list = [] # 时间索引[str]
|
||||
time_index = start_time
|
||||
while time_index <= end_time:
|
||||
time_index_list.append(time_index)
|
||||
time_index += timedelta(minutes=step)
|
||||
return time_index_list
|
||||
|
||||
@ staticmethod
|
||||
def round_time(time_: datetime, interval=5):
|
||||
"""时间向下取整到整n分钟(北京时间): 四舍六入五留双/向下取整"""
|
||||
# return datetime.fromtimestamp(round(time_.timestamp() / (60 * interval)) * (60 * interval))
|
||||
return datetime.fromtimestamp(int((time_.timestamp()) // (60 * interval)) * (60 * interval))
|
||||
|
||||
|
||||
def convert_utc_to_bj(utc_time_str):
|
||||
"""将utc时间(str)转换成北京时间(datetime)"""
|
||||
# 解析UTC时间字符串为datetime对象
|
||||
utc_time = datetime.strptime(utc_time_str, '%Y-%m-%dT%H:%M:%SZ')
|
||||
# 设定UTC时区
|
||||
utc_timezone = pytz.timezone('UTC')
|
||||
# 转换为北京时间
|
||||
beijing_timezone = pytz.timezone('Asia/Shanghai')
|
||||
beijing_time = utc_time.replace(tzinfo=utc_timezone).astimezone(beijing_timezone).replace(tzinfo=None)
|
||||
return beijing_time
|
||||
|
||||
|
||||
def get_datetime(cur_datetime:str):
|
||||
str_format = "%Y-%m-%d %H:%M:%S"
|
||||
return datetime.strptime(cur_datetime, str_format)
|
||||
|
||||
|
||||
def get_strftime(cur_datetime: datetime):
|
||||
str_format = "%Y-%m-%d %H:%M:%S"
|
||||
return cur_datetime.strftime(str_format)
|
||||
|
||||
|
||||
def step_time(cur_datetime:str, step=5):
|
||||
str_format="%Y-%m-%d %H:%M:%S"
|
||||
dt=datetime.strptime(cur_datetime,str_format)
|
||||
dt=dt+timedelta(minutes=step)
|
||||
return datetime.strftime(dt,str_format)
|
||||
|
||||
|
||||
def get_pattern_index(cur_datetime:str)->int:
|
||||
str_format="%Y-%m-%d %H:%M:%S"
|
||||
dt=datetime.strptime(cur_datetime,str_format)
|
||||
hr=dt.hour
|
||||
mnt=dt.minute
|
||||
i=int((hr*60+mnt)/PATTERN_TIME_STEP)
|
||||
return i
|
||||
|
||||
def get_pattern_index_str(cur_datetime:str)->str:
|
||||
i=get_pattern_index(cur_datetime)
|
||||
[minN,hrN]=modf(i*PATTERN_TIME_STEP/60)
|
||||
minN_str=str(int(minN*60))
|
||||
minN_str=minN_str.zfill(2)
|
||||
hrN_str=str(int(hrN))
|
||||
hrN_str=hrN_str.zfill(2)
|
||||
str_i='{}:{}:00'.format(hrN_str,minN_str)
|
||||
return str_i
|
||||
|
||||
|
||||
def from_seconds_to_clock (secs: int)->str:
|
||||
hrs=int(secs/3600)
|
||||
minutes=int((secs-hrs*3600)/60)
|
||||
seconds=(secs-hrs*3600-minutes*60)
|
||||
hrs_str=str(hrs).zfill(2)
|
||||
minutes_str=str(minutes).zfill(2)
|
||||
seconds_str=str(seconds).zfill(2)
|
||||
str_clock='{}:{}:{}'.format(hrs_str,minutes_str,seconds_str)
|
||||
return str_clock
|
||||
|
||||
def from_clock_to_seconds (clock: str)->int:
|
||||
str_format="%Y-%m-%d %H:%M:%S"
|
||||
dt=datetime.strptime(clock,str_format)
|
||||
hr=dt.hour
|
||||
mnt=dt.minute
|
||||
seconds=dt.second
|
||||
return hr*3600+mnt*60+seconds
|
||||
|
||||
def from_clock_to_seconds_2 (clock: str)->int:
|
||||
str_format="%H:%M:%S"
|
||||
dt=datetime.strptime(clock,str_format)
|
||||
hr=dt.hour
|
||||
mnt=dt.minute
|
||||
seconds=dt.second
|
||||
return hr*3600+mnt*60+seconds
|
||||
|
||||
|
||||
def from_clock_to_seconds_3 (clock: str)->int:
|
||||
str_format = "%H:%M" # 更新时间格式以适应 "小时:分钟" 格式
|
||||
dt = datetime.strptime(clock,str_format)
|
||||
hr = dt.hour
|
||||
mnt = dt.minute
|
||||
seconds = dt.second
|
||||
return hr * 3600 + mnt * 60
|
||||
|
||||
|
||||
###convert datetimestring
|
||||
##"XXXX-XX-XXT00:00:00Z" ->"XXXX-XX-XX 00:00:00"
|
||||
def trim_time_flag(url_date_time:str)->str:
|
||||
str_datetime=str.replace(url_date_time,'T',' ')
|
||||
str_datetime=str.replace(str_datetime,'Z','')
|
||||
return str_datetime
|
||||
|
||||
# 单时间步长模拟
|
||||
def run_simulation(name:str,start_datetime:str,end_datetime:str=None, duration:int=900)->str:
|
||||
if(is_project_open(name)):
|
||||
close_project(name)
|
||||
open_project(name)
|
||||
#get_current_data(cur_datetime)
|
||||
#extract the patternindex from datetime
|
||||
#e.g. 0: the first time step for 00:00-00:14; 1: the second step for 00:15-00:30
|
||||
start_datetime=trim_time_flag(start_datetime)
|
||||
if(end_datetime!=None):
|
||||
end_datetime=trim_time_flag(end_datetime)
|
||||
|
||||
|
||||
|
||||
## redistribute the basedemand according to the currentTotalQ and the base_totalQ
|
||||
# step 1. get_real _data
|
||||
if end_datetime==None or start_datetime==end_datetime:
|
||||
end_datetime=step_time(start_datetime)
|
||||
|
||||
# # ids=['2498','3854','3853','2510','2514','4780','4854']
|
||||
# # real_data=get_real_data(ids,start_datetime,end_datetime)
|
||||
# print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S")+"--获取实时数据完毕\n")
|
||||
# #step 2. re-distribute the real q to base demand of the node region_sa by region_sa
|
||||
# regions=get_all_service_area_ids(name)
|
||||
# total_demands={}
|
||||
# for region in regions:
|
||||
# total_demands[region]=get_total_base_demand(name,region)
|
||||
# region_demand_factor={}
|
||||
# #Region_ID:SA_ZBBDJSCP000002 高区;SA_R00003+SA_ZBBDTJSC000001 低区
|
||||
# H_region_real_demands=real_data[DN_900_ID][start_datetime]+real_data[DN_500_ID][start_datetime]
|
||||
# L_region_real_demands=real_data[DN_1000_ID][start_datetime]
|
||||
# factor_H_zone=H_region_real_demands/total_demands[H_REGION_1]/3.6 #3.6: m3/h->L/s
|
||||
# factor_L_zone=L_region_real_demands/(total_demands[L_REGION_1]+total_demands[L_REGION_2])/3.6
|
||||
# print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S")+"--流量因子计算完毕完毕\n")
|
||||
# for region in regions:
|
||||
# region_nodes=get_nodes_in_region(name,region)
|
||||
# factor=1
|
||||
# if region==H_REGION_1 or H_REGION_2:
|
||||
# factor=factor_H_zone
|
||||
# else:
|
||||
# factor=factor_L_zone
|
||||
#
|
||||
# for node in region_nodes:
|
||||
# d=get_demand(name,node)
|
||||
# for r in d['demands']:
|
||||
# r['demand']=factor*r['demand']
|
||||
# cs=ChangeSet()
|
||||
# cs.append(d)
|
||||
# set_demand(name,cs)
|
||||
#
|
||||
# #
|
||||
# #
|
||||
# print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S")+"--节点流量重分配完毕\n")
|
||||
|
||||
#step 3. set pattern index to the current time,and set duration to 300 secs
|
||||
#
|
||||
str_pattern_start=get_pattern_index_str(start_datetime)
|
||||
dic_time=get_time(name)
|
||||
dic_time['PATTERN START']=str_pattern_start
|
||||
|
||||
if duration !=None:
|
||||
dic_time['DURATION']=from_seconds_to_clock(duration)
|
||||
else:
|
||||
dic_time['DURATION']=dic_time['HYDRAULIC TIMESTEP']
|
||||
cs=ChangeSet()
|
||||
cs.operations.append(dic_time)
|
||||
set_time(name,cs)
|
||||
|
||||
# step4. run simulation and save the result to name-time.out for download
|
||||
#inp_file = 'inp\\'+name+'.inp'
|
||||
#db_name=name
|
||||
#dump_inp(db_name,inp_file,'2')
|
||||
# result=run_inp(db_name)
|
||||
result=run_project(name)
|
||||
#json string format
|
||||
# simulation_result, output, report
|
||||
result_data=json.loads(result)
|
||||
#print(result_data['simulation_result'])
|
||||
print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S")+'run finished successfully\n')
|
||||
#print(result_data['report'])
|
||||
return result
|
||||
|
||||
|
||||
# 在线模拟
|
||||
def run_simulation_ex(name: str, simulation_type: str, start_datetime: str,
|
||||
end_datetime: str = None, duration: int = 0,
|
||||
pump_control: dict[str, list] = None, tank_initial_level_control: dict[str, float] = None,
|
||||
region_demand_control: dict[str, float] = None, valve_control: dict[str, dict] = None,
|
||||
downloading_prohibition: bool = False) -> str:
|
||||
time_cost_start = time.perf_counter()
|
||||
print('{} -- Hydraulic simulation started.'.format(
|
||||
datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S')))
|
||||
|
||||
if is_project_open(name):
|
||||
close_project(name)
|
||||
|
||||
if simulation_type.upper() == 'REALTIME': # 实时模拟(修改原数据库)
|
||||
name_c = name
|
||||
elif simulation_type.upper() == 'EXTENDED': # 扩展模拟(复制数据库)
|
||||
name_c = '_'.join([name, 'c'])
|
||||
if have_project(name_c):
|
||||
if is_project_open(name_c):
|
||||
close_project(name_c)
|
||||
delete_project(name_c)
|
||||
copy_project(name, name_c) # 备份项目
|
||||
else:
|
||||
raise Exception('Incorrect simulation type, choose in (realtime, extended)')
|
||||
|
||||
open_project(name_c)
|
||||
|
||||
# 时间处理
|
||||
# extract the pattern index from datetime
|
||||
# e.g. 0: the first time step for 00:00-00:14; 1: the second step for 00:15-00:30
|
||||
# start_datetime = get_strftime(convert_utc_to_bj(start_datetime))
|
||||
start_datetime = trim_time_flag(start_datetime)
|
||||
if end_datetime is not None:
|
||||
# end_datetime = get_strftime(convert_utc_to_bj(end_datetime))
|
||||
end_datetime = trim_time_flag(end_datetime)
|
||||
|
||||
# pump name转化/输入值规范化
|
||||
if pump_control is not None:
|
||||
for key in list(pump_control.keys()):
|
||||
pump_control[key] = [pump_control[key]] if type(pump_control[key]) is not list else pump_control[key]
|
||||
pump_control[pumps[pumps_name.index(key)]] = pump_control.pop(key)
|
||||
|
||||
# 重新分配节点(nodes)水量
|
||||
# 1) (single)base_demand_new=1, pattern_new=real_data
|
||||
# 2) (unity)base_demand_new=base_demand_old, pattern_new=factor*pattern_old(factor=flow_new/flow_old)
|
||||
# 获取需水量数据
|
||||
# a) 历史pattern对应水量(读取保存数据库)
|
||||
# b) 实时水量(数据接口下载)
|
||||
|
||||
# 修改node demand = 1, pattern factor *= demand(monitor single patterns对应node)
|
||||
# nodes = get_nodes(name_c) # nodes
|
||||
# for node_name in nodes: # 遍历nodes
|
||||
# demands_dict = get_demand(name_c, node_name) # {'demands':[{'demand':, 'pattern':}]}
|
||||
# for demands in demands_dict['demands']:
|
||||
# if (demands['pattern'] in monitor_single_patterns) and (demands['demand'] != 1): # 1)
|
||||
# pattern = get_pattern(name_c, demands['pattern'])
|
||||
# pattern['factors'] = list(demands['demand'] * np.array(pattern['factors'])) # 修改pattern
|
||||
# cs = ChangeSet()
|
||||
# cs.append(pattern)
|
||||
# set_pattern(name_c, cs)
|
||||
# demands_dict['demands'][
|
||||
# demands_dict['demands'].index(demands)
|
||||
# ]['demand'] = 1 # 修改demand
|
||||
# cs = ChangeSet()
|
||||
# cs.append(demands_dict)
|
||||
# set_demand(name_c, cs)
|
||||
|
||||
start_time = get_datetime(start_datetime) # datetime
|
||||
end_time = get_datetime(end_datetime) \
|
||||
if end_datetime is not None \
|
||||
else get_datetime(start_datetime) + timedelta(seconds=duration) # datetime
|
||||
# modify_pattern_start_index = get_pattern_index(start_datetime) # 待修改pattern的起始索引(int)
|
||||
|
||||
dataset_loader = DataLoader(project_name=name_c,
|
||||
start_time=start_time, end_time=end_time,
|
||||
pumps_control=pump_control, tank_initial_level_control=tank_initial_level_control,
|
||||
region_demand_control=region_demand_control,
|
||||
downloading_prohibition=downloading_prohibition) # 实例化数据加载器
|
||||
modify_index \
|
||||
= dataset_loader.load_data() # 加载数据(index: 需要修改pattern的factor index, None: 无需修改除水泵和调节池外pattern)
|
||||
new_patterns \
|
||||
= dataset_loader.new_pattern_factor # {name: float,} pattern factor(实时: 更新, 其他: 保持/更新(设定用水量时))
|
||||
tank_init_level = dataset_loader.tank_data # {name: float,} 调节池初始液位(实时: 更新, 其他: 保持/更新(设定液位时))
|
||||
reservoir_level = dataset_loader.reservoir_data # {name: float,} 水库液位(实时: 更新, 其他: 保持)
|
||||
pump_freq = dataset_loader.pump_data # {name: [float,]} 水泵频率(实时: 更新, 其他: 保持/更新(设定状态时))
|
||||
print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Loading data ok.\n")
|
||||
|
||||
pattern_name_list = get_patterns(name_c) # 所有pattern
|
||||
# 修改node pattern/demand
|
||||
# nodes = get_nodes(name_c) # nodes
|
||||
# for node_name in nodes: # 遍历nodes
|
||||
# demands_dict = get_demand(name_c, node_name) # {'demands':[{'demand':, 'pattern':}]}
|
||||
# for demands in demands_dict['demands']:
|
||||
# if demands['pattern'] in monitor_single_patterns: # 1)
|
||||
# demands_dict['demands'][
|
||||
# demands_dict['demands'].index(demands)
|
||||
# ]['demand'] = 1 # 修改demand
|
||||
# pattern = get_pattern(name_c, demands['pattern'])
|
||||
# pattern['factors'][modify_index] = flow_new[demands['pattern']] # 修改pattern
|
||||
# cs = ChangeSet()
|
||||
# cs.append(pattern)
|
||||
# set_pattern(name_c, cs)
|
||||
# if demands['pattern'] in pattern_name_list:
|
||||
# pattern_name_list.remove(demands['pattern']) # 移出待修改pattern列表
|
||||
# else: # 2)
|
||||
# continue
|
||||
# cs = ChangeSet()
|
||||
# cs.append(demands_dict)
|
||||
# set_demand(name_c, cs)
|
||||
for pattern_name in monitor_patterns: # 遍历patterns
|
||||
if not np.isnan(new_patterns[pattern_name][0]):
|
||||
pattern = get_pattern(name_c, pattern_name)
|
||||
pattern['factors'][modify_index:
|
||||
modify_index + len(new_patterns[pattern_name])] \
|
||||
= new_patterns[pattern_name]
|
||||
cs = ChangeSet()
|
||||
cs.append(pattern)
|
||||
set_pattern(name_c, cs)
|
||||
if pattern_name in pattern_name_list:
|
||||
pattern_name_list.remove(pattern_name) # 移出待修改pattern列表
|
||||
|
||||
# 修改清水池(reservoir)液位pattern
|
||||
for reservoir_name in reservoirs: # 遍历reservoirs
|
||||
if (not np.isnan(reservoir_level[reservoir_name])) and (reservoir_level[reservoir_name] != 0):
|
||||
reservoir_pattern = get_pattern(name_c, get_reservoir(name_c, reservoir_name)['pattern'])
|
||||
reservoir_pattern['factors'][modify_index] = reservoir_level[reservoir_name]
|
||||
cs = ChangeSet()
|
||||
cs.append(reservoir_pattern)
|
||||
set_pattern(name_c, cs)
|
||||
if reservoir_pattern['id'] in pattern_name_list:
|
||||
pattern_name_list.remove(reservoir_pattern['id']) # 移出待修改pattern列表
|
||||
|
||||
# 修改调节池(tank)初始液位
|
||||
for tank_name in tanks: # 遍历tanks
|
||||
if (not np.isnan(tank_init_level[tank_name])) and (tank_init_level[tank_name] != 0):
|
||||
tank = get_tank(name_c, tank_name)
|
||||
tank['init_level'] = tank_init_level[tank_name]
|
||||
cs = ChangeSet()
|
||||
cs.append(tank)
|
||||
set_tank(name_c, cs)
|
||||
|
||||
# 修改水泵(pump)pattern
|
||||
for pump_name in pumps: # 遍历pumps
|
||||
if not np.isnan(pump_freq[pump_name][0]):
|
||||
pump_pattern = get_pattern(name_c, get_pump(name_c, pump_name)['pattern'])
|
||||
pump_pattern['factors'][modify_index
|
||||
:modify_index + len(pump_freq[pump_name])] \
|
||||
= pump_freq[pump_name]
|
||||
cs = ChangeSet()
|
||||
cs.append(pump_pattern)
|
||||
set_pattern(name_c, cs)
|
||||
if pump_pattern['id'] in pattern_name_list:
|
||||
pattern_name_list.remove(pump_pattern['id']) # 移出待修改pattern列表
|
||||
# 修改阀门(valve)status和setting
|
||||
if valve_control is not None:
|
||||
for valve in valve_control.keys():
|
||||
status = get_status(name_c, valve)
|
||||
if 'status' in valve_control[valve].keys():
|
||||
status['status'] = valve_control[valve]['status']
|
||||
if 'setting' in valve_control[valve].keys():
|
||||
status['setting'] = valve_control[valve]['setting']
|
||||
if 'k' in valve_control[valve].keys():
|
||||
valve_k = valve_control[valve]['k']
|
||||
if valve_k == 0:
|
||||
status['status'] = 'CLOSED'
|
||||
else:
|
||||
status['setting'] = 0.1036 * pow(valve_k, -3.105)
|
||||
cs = ChangeSet()
|
||||
cs.append(status)
|
||||
set_status(name_c, cs)
|
||||
|
||||
print('Finish demands amending, unmodified patterns: {}.'.format(pattern_name_list))
|
||||
|
||||
# 修改时间信息
|
||||
str_pattern_start = get_pattern_index_str(
|
||||
DataLoader.round_time(start_time, int(PATTERN_TIME_STEP)).strftime("%Y-%m-%d %H:%M:%S"))
|
||||
dic_time = get_time(name_c)
|
||||
dic_time['PATTERN START'] = str_pattern_start
|
||||
|
||||
if duration is not None:
|
||||
dic_time['DURATION'] = from_seconds_to_clock(duration)
|
||||
else:
|
||||
dic_time['DURATION'] = dic_time['HYDRAULIC TIMESTEP']
|
||||
cs = ChangeSet()
|
||||
cs.operations.append(dic_time)
|
||||
set_time(name_c, cs)
|
||||
|
||||
# 运行并返回结果
|
||||
result = run_project(name_c)
|
||||
|
||||
time_cost_end = time.perf_counter()
|
||||
print('{} -- Hydraulic simulation finished, cost time: {:.2f} s.'.format(
|
||||
datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'),
|
||||
time_cost_end - time_cost_start))
|
||||
|
||||
close_project(name_c)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# if get_current_data()==True:
|
||||
# tQ=get_current_total_Q()
|
||||
# print(f"the current tQ is {tQ}\n")
|
||||
# data=get_hist_data(ids,conver_beingtime_to_ucttime('2024-04-10 15:05:00'),conver_beingtime_to_ucttime('2024-04-10 15:10:00'))
|
||||
# open_project("beibeizone")
|
||||
# read_inp("beibeizone","beibeizone-export_nochinese.inp")
|
||||
# run_simulation("beibeizone","2024-04-01T08:00:00Z")
|
||||
# read_inp('bb_server', 'model20_en.inp')
|
||||
run_simulation_ex(
|
||||
name=project_info.name, simulation_type='extended', start_datetime='2024-11-09T02:30:00Z',
|
||||
# end_datetime='2024-05-30T16:00:00Z',
|
||||
# duration=0,
|
||||
# pump_control={'PU00006': [45, 40]}
|
||||
# region_demand_control={'hp': 6000, 'lp': 2000}
|
||||
)
|
||||
9
scripts/startfastapiserver.bat
Normal file
9
scripts/startfastapiserver.bat
Normal file
@@ -0,0 +1,9 @@
|
||||
REM f:
|
||||
REM cd "f:\DEV\GitHub\TJWaterServer"
|
||||
|
||||
git pull
|
||||
|
||||
REM call startpg.bat
|
||||
cd C:\SourceCode\Server
|
||||
REM uvicorn main:app --host 0.0.0.0 --port 80 --reload
|
||||
uvicorn main:app --host 0.0.0.0 --port 80
|
||||
5
scripts/startpg.bat
Normal file
5
scripts/startpg.bat
Normal file
@@ -0,0 +1,5 @@
|
||||
C:
|
||||
cd "C:\pg-14.7\bin"
|
||||
REM pg_ctl -D ../data -l logfile stop
|
||||
pg_ctl -D ../data -l logfile start
|
||||
cd "c:\SourceCode\Server"
|
||||
6621
scripts/test_tjnetwork.py
Normal file
6621
scripts/test_tjnetwork.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user