暂存文件的引用修复
This commit is contained in:
856
app/algorithms/api_ex/run_simulation.py
Normal file
856
app/algorithms/api_ex/run_simulation.py
Normal file
@@ -0,0 +1,856 @@
|
||||
import numpy as np
|
||||
from app.services.tjnetwork import *
|
||||
from api.s36_wda_cal import *
|
||||
# from get_real_status import *
|
||||
from datetime import datetime,timedelta
|
||||
from math import modf
|
||||
import json
|
||||
import pytz
|
||||
import requests
|
||||
import time
|
||||
import app.services.project_info as project_info
|
||||
|
||||
url_path = 'http://10.101.15.16:9000/loong' # 内网
|
||||
# url_path = 'http://183.64.62.100:9057/loong' # 外网
|
||||
url_real = url_path + '/api/mpoints/realValue'
|
||||
url_hist = url_path + '/api/curves/data'
|
||||
|
||||
PATTERN_TIME_STEP=15.0
|
||||
DN_900_ID='2498'
|
||||
DN_500_ID='3854'
|
||||
DN_1000_ID='3853'
|
||||
H_RESSURE='2510'
|
||||
L_PRESURE='2514'
|
||||
H_TANK='4780'
|
||||
L_TANK='4854'
|
||||
|
||||
H_REGION_1='SA_ZBBDJSCP000002'
|
||||
H_REGION_2='' #to do
|
||||
L_REGION_1='SA_ZBBDTJSC000001'
|
||||
L_REGION_2='SA_R00003'
|
||||
|
||||
# reservoir basic height
|
||||
RESERVOIR_BASIC_HEIGHT = float(250.35)
|
||||
|
||||
# regions
|
||||
regions = ['hp', 'lp']
|
||||
regions_demand_patterns = {'hp': ['DN900', 'DN500'], 'lp': ['DN1000']} # 出厂水量近似表示用水量
|
||||
regions_patterns = {'hp': ['ChuanYiJiXiao', 'BeiQuanHuaYuan', 'ZhuangYuanFuDi', 'JingNingJiaYuan',
|
||||
'308', 'JiaYinYuan', 'XinChengGuoJi', 'YiJingBeiChen', 'ZhongYangXinDu',
|
||||
'XinHaiJiaYuan', 'DongFengJie', 'DingYaXinYu', 'ZiYunTai', 'XieMaGuangChang',
|
||||
'YongJinFu', 'BianDianZhan', 'BeiNanDaDao', 'TianShengLiJie', 'XueYuanXiaoQu',
|
||||
'YunHuaLu', 'GaoJiaQiao', 'LuZuoFuLuXiaDuan', 'TianRunCheng', 'CaoJiaBa',
|
||||
'PuLingChang', 'QiLongXiaoQu', 'TuanXiao',
|
||||
'TuanShanBaoZhongShiHua', 'XieMa', 'BeiWenQuanJiuHaoErQi', 'LaiYinHuSiQi',
|
||||
'DN500', 'DN900'],
|
||||
'lp': ['PanXiMingDu', 'WanKeJinYuHuaFuGaoCeng', 'KeJiXiao',
|
||||
'LuGouQiao', 'LongJiangHuaYuan', 'LaoQiZhongDui', 'ShiYanCun', 'TianQiDaSha',
|
||||
'TianShengPaiChuSuo', 'TianShengShangPin', 'JiaoTang', 'RenMinHuaYuan',
|
||||
'TaiJiBinJiangYiQi', 'TianQiHuaYuan', 'TaiJiBinJiangErQi', '122Zhong',
|
||||
'WanKeJinYuHuaFuYangFang', 'ChengBeiCaiShiKou', 'WenXingShe', 'YueLiangTianBBGJCZ',
|
||||
'YueLiangTian', 'YueLiangTian200', 'ChengTaoChang', 'HuoCheZhan', 'LiangKu', 'QunXingLu',
|
||||
'JiuYuanErTongYiYuan', 'TangDouHua', 'TaiJiBinJiangErQi(SanJi)',
|
||||
'ZhangDouHua', 'JinYunXiaoQuDN400',
|
||||
'DN1000']}
|
||||
|
||||
# nodes
|
||||
monitor_single_patterns = ['ChuanYiJiXiao', 'BeiQuanHuaYuan', 'ZhuangYuanFuDi', 'JingNingJiaYuan',
|
||||
'308', 'JiaYinYuan', 'XinChengGuoJi', 'YiJingBeiChen', 'ZhongYangXinDu',
|
||||
'XinHaiJiaYuan', 'DongFengJie', 'DingYaXinYu', 'ZiYunTai', 'XieMaGuangChang',
|
||||
'YongJinFu', 'PanXiMingDu', 'WanKeJinYuHuaFuGaoCeng', 'KeJiXiao',
|
||||
'LuGouQiao', 'LongJiangHuaYuan', 'LaoQiZhongDui', 'ShiYanCun', 'TianQiDaSha',
|
||||
'TianShengPaiChuSuo', 'TianShengShangPin', 'JiaoTang', 'RenMinHuaYuan',
|
||||
'TaiJiBinJiangYiQi', 'TianQiHuaYuan', 'TaiJiBinJiangErQi', '122Zhong',
|
||||
'WanKeJinYuHuaFuYangFang']
|
||||
monitor_single_patterns_id = {'ChuanYiJiXiao': '7338', 'BeiQuanHuaYuan': '7315', 'ZhuangYuanFuDi': '7316',
|
||||
'JingNingJiaYuan': '7528', '308': '8272', 'JiaYinYuan': '7304',
|
||||
'XinChengGuoJi': '7325', 'YiJingBeiChen': '7328', 'ZhongYangXinDu': '7329',
|
||||
'XinHaiJiaYuan': '9138', 'DongFengJie': '7302', 'DingYaXinYu': '7331',
|
||||
'ZiYunTai': '7420,9059', 'XieMaGuangChang': '7326', 'YongJinFu': '9059',
|
||||
'PanXiMingDu': '7320', 'WanKeJinYuHuaFuGaoCeng': '7419',
|
||||
'KeJiXiao': '7305', 'LuGouQiao': '7306', 'LongJiangHuaYuan': '7318',
|
||||
'LaoQiZhongDui': '9075', 'ShiYanCun': '7309', 'TianQiDaSha': '7323',
|
||||
'TianShengPaiChuSuo': '7335', 'TianShengShangPin': '7324', 'JiaoTang': '7332',
|
||||
'RenMinHuaYuan': '7322', 'TaiJiBinJiangYiQi': '7333', 'TianQiHuaYuan': '8235',
|
||||
'TaiJiBinJiangErQi': '7334', '122Zhong': '7314', 'WanKeJinYuHuaFuYangFang': '7418'}
|
||||
|
||||
monitor_unity_patterns = ['BianDianZhan', 'BeiNanDaDao', 'TianShengLiJie', 'XueYuanXiaoQu',
|
||||
'YunHuaLu', 'GaoJiaQiao', 'LuZuoFuLuXiaDuan', 'TianRunCheng',
|
||||
'CaoJiaBa', 'PuLingChang', 'QiLongXiaoQu', 'TuanXiao',
|
||||
'ChengBeiCaiShiKou', 'WenXingShe', 'YueLiangTianBBGJCZ',
|
||||
'YueLiangTian', 'YueLiangTian200',
|
||||
'ChengTaoChang', 'HuoCheZhan', 'LiangKu', 'QunXingLu',
|
||||
'TuanShanBaoZhongShiHua', 'XieMa', 'BeiWenQuanJiuHaoErQi', 'LaiYinHuSiQi',
|
||||
'JiuYuanErTongYiYuan', 'TangDouHua', 'TaiJiBinJiangErQi(SanJi)',
|
||||
'ZhangDouHua', 'JinYunXiaoQuDN400',
|
||||
'DN500', 'DN900', 'DN1000']
|
||||
monitor_unity_patterns_id = {'BianDianZhan': '7339', 'BeiNanDaDao': '7319', 'TianShengLiJie': '8242',
|
||||
'XueYuanXiaoQu': '7327', 'YunHuaLu': '7312', 'GaoJiaQiao': '7340',
|
||||
'LuZuoFuLuXiaDuan': '7343', 'TianRunCheng': '7310', 'CaoJiaBa': '7300',
|
||||
'PuLingChang': '7307', 'QiLongXiaoQu': '7321', 'TuanXiao': '8963',
|
||||
'ChengBeiCaiShiKou': '7330', 'WenXingShe': '7311',
|
||||
'YueLiangTianBBGJCZ': '7313', 'YueLiangTian': '7313', 'YueLiangTian200': '7313',
|
||||
'ChengTaoChang': '7301', 'HuoCheZhan': '7303',
|
||||
'LiangKu': '7296', 'QunXingLu': '7308',
|
||||
'DN500': '3854', 'DN900': '2498', 'DN1000': '3853'}
|
||||
monitor_patterns = monitor_single_patterns + monitor_unity_patterns
|
||||
monitor_patterns_id = {**monitor_single_patterns_id, **monitor_unity_patterns_id}
|
||||
# pumps
|
||||
pumps_name = ['1#', '2#', '3#', '4#', '5#', '6#', '7#']
|
||||
pumps = ['PU00000', 'PU00001', 'PU00002', 'PU00003', 'PU00004', 'PU00005', 'PU00006']
|
||||
variable_frequency_pumps = ['PU00004', 'PU00005', 'PU00006']
|
||||
pumps_id = {'PU00000': '2747', 'PU00001': '2776', 'PU00002': '2730', 'PU00003': '2787',
|
||||
'PU00004': '2500', 'PU00005': '2502', 'PU00006': '2504'}
|
||||
# reservoirs
|
||||
reservoirs = ['ZBBDJSCP000002', 'R00003']
|
||||
reservoirs_id = {'ZBBDJSCP000002': '2497', 'R00003': '2571'}
|
||||
# tanks
|
||||
tanks = ['ZBBDTJSC000002', 'ZBBDTJSC000001']
|
||||
tanks_id = {'ZBBDTJSC000002': '4780', 'ZBBDTJSC000001': '9774'}
|
||||
|
||||
|
||||
class DataLoader:
|
||||
"""数据加载器"""
|
||||
def __init__(self, project_name, start_time: datetime, end_time: datetime,
|
||||
pumps_control: dict = None, tank_initial_level_control: dict = None,
|
||||
region_demand_control: dict = None, downloading_prohibition: bool = False):
|
||||
self.project_name = project_name # 数据库名
|
||||
self.current_time = self.round_time(datetime.now(pytz.timezone('Asia/Shanghai')), 1) # 圆整至整分钟
|
||||
self.current_round_time = self.round_time(self.current_time, int(PATTERN_TIME_STEP))
|
||||
self.updating_data_flag = True \
|
||||
if self.current_round_time == self.round_time(start_time, int(PATTERN_TIME_STEP)) \
|
||||
else False # 判断是否从当前时刻开始模拟(是否更新最新监测数据)
|
||||
self.downloading_prohibition = downloading_prohibition # 是否禁止下载数据(默认False: 允许下载)
|
||||
self.updating_data_flag = False if self.downloading_prohibition else self.updating_data_flag
|
||||
self.pattern_start_index = get_pattern_index(
|
||||
self.round_time(start_time, int(PATTERN_TIME_STEP)).strftime("%Y-%m-%d %H:%M:%S")) # pattern起始索引
|
||||
self.pattern_end_index = get_pattern_index(
|
||||
self.round_time(end_time, int(PATTERN_TIME_STEP)).strftime("%Y-%m-%d %H:%M:%S")) # pattern结束索引
|
||||
self.pattern_index_list = list(range(self.pattern_start_index, self.pattern_end_index + 1)) # pattern索引列表
|
||||
self.download_id = self.get_download_id() # 数据下载接口id '7338,7315,7316,...'
|
||||
self.current_time_download_data = dict(
|
||||
zip(self.download_id.split(','),
|
||||
[np.nan]*len(list(self.download_id.split(','))))
|
||||
) # {id(str): value(float)}
|
||||
self.current_time_download_data_flag = dict(
|
||||
zip(self.download_id.split(','),
|
||||
[False]*len(list(self.download_id.split(','))))
|
||||
) # 下载数据是否具备实时性, {id(str): flag(bool)}
|
||||
self.old_flow_data = self.init_dict_of_list(dict(
|
||||
zip(monitor_patterns,
|
||||
[[np.nan]] * (len(monitor_patterns)))
|
||||
)) # {pattern_name(str): flow(float)}
|
||||
self.old_pattern_factor = self.init_dict_of_list(dict(
|
||||
zip(monitor_patterns,
|
||||
[[np.nan]] * (len(monitor_patterns)))
|
||||
)) # {pattern_name(str): [pattern_factor(float)]}
|
||||
self.new_flow_data = self.init_dict_of_list(dict(
|
||||
zip(monitor_patterns,
|
||||
[[np.nan]] * (len(monitor_patterns)))
|
||||
)) # {pattern_name(str): flow(float)}
|
||||
self.new_pattern_factor = self.init_dict_of_list(dict(
|
||||
zip(monitor_patterns,
|
||||
[[np.nan]] * (len(monitor_patterns)))
|
||||
)) # {pattern_name(str): [pattern_factor(float)]}
|
||||
self.reservoir_data = dict(zip(reservoirs, [np.nan]*len(reservoirs))) # {reservoir_name(str): level(float)}
|
||||
self.tank_data = dict(zip(tanks, [np.nan] * len(tanks))) # {tank_name(str): level(float)}
|
||||
self.pump_data = self.init_dict_of_list(
|
||||
dict(zip(pumps, [[np.nan]]*len(pumps)))) # {pump_name(str): [frequency(float)]}
|
||||
self.pump_control = pumps_control # {pump_name(str): [frequency(float)]}
|
||||
self.tank_initial_level_control = tank_initial_level_control # {tank_name(str): level(float)}
|
||||
self.region_demand_current = dict(zip(regions, [0]*len(regions))) # {region_name(str): total_demand(float)}
|
||||
self.region_demand_control = region_demand_control # {region_name(str): total_demand(float)}
|
||||
self.region_demand_control_factor = dict(
|
||||
zip(regions, [1]*len(regions))) # 区域流量控制系数(用于调整用水量), {region_name(str): factor(float)}
|
||||
|
||||
def load_data(self):
|
||||
"""生成数据集"""
|
||||
self.download_data() # 下载实时数据
|
||||
self.get_old_pattern_and_flow() # 读取历史记录pattern信息
|
||||
self.cal_demand_convert_factor() # 计算用水量转换系数(设定用水量时)
|
||||
self.set_new_flow() # 设置'更新'流量
|
||||
self.set_new_pattern_factor() # 设置'更新'pattern factors
|
||||
self.set_reservoirs() # 设置清水池
|
||||
self.set_tanks() # 设置调节池
|
||||
self.set_pumps() # 设置水泵
|
||||
return self.pattern_start_index
|
||||
|
||||
def download_data(self):
|
||||
"""下载数据"""
|
||||
if self.updating_data_flag is True:
|
||||
print('{} -- Start downloading data.'.format(
|
||||
datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
|
||||
data_wait_flag = True
|
||||
while data_wait_flag:
|
||||
try:
|
||||
newest_data_time = self.download_real_data(self.download_id) # 获取实时数据
|
||||
except Exception as e:
|
||||
print('{}\nWaiting for real data.'.format(e))
|
||||
time.sleep(1)
|
||||
else:
|
||||
print('{} -- Downloading data ok. Newest timestamp: {}.'.format(
|
||||
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
||||
newest_data_time.strftime('%Y-%m-%d %H:%M:%S')))
|
||||
data_wait_flag = False
|
||||
|
||||
def cal_current_region_demand(self):
|
||||
"""计算区域当前用水量"""
|
||||
if self.updating_data_flag is True:
|
||||
for region in self.region_demand_current.keys():
|
||||
total_demand = 0
|
||||
for pipe in regions_demand_patterns[region]:
|
||||
total_demand += self.current_time_download_data[monitor_patterns_id[pipe]] # 出厂流量
|
||||
self.region_demand_current[region] = total_demand
|
||||
|
||||
def cal_history_region_demand(self, pattern_index_list):
|
||||
"""计算区域历史用水量(对应记录的pattern)"""
|
||||
old_demand = {}
|
||||
for region in regions:
|
||||
total_demand = 0
|
||||
for pipe_pattern_name in regions_demand_patterns[region]:
|
||||
old_flows, old_patterns = self.get_history_pattern_info(self.project_name, pipe_pattern_name)
|
||||
for idx in pattern_index_list:
|
||||
total_demand += old_flows[idx] / 4 # 15分钟水量
|
||||
old_demand[region] = total_demand
|
||||
return old_demand
|
||||
|
||||
def cal_demand_convert_factor(self):
|
||||
"""计算用水量转换系数(设定用水量时)"""
|
||||
self.cal_current_region_demand() # 计算区域当前时刻用水量
|
||||
old_demand_moment = self.cal_history_region_demand([self.pattern_start_index]) # 计算区域目标时刻总用水量
|
||||
old_demand_period = self.cal_history_region_demand(self.pattern_index_list) # 计算区域目标时段总用水量
|
||||
for region in regions:
|
||||
self.region_demand_control_factor[region] \
|
||||
= (self.region_demand_current[region] / 4) / old_demand_moment[region] \
|
||||
if self.updating_data_flag is True else 1
|
||||
self.region_demand_control_factor[region] = self.region_demand_control[region] / old_demand_period[region] \
|
||||
if (self.region_demand_control is not None) and (region in self.region_demand_control.keys()) \
|
||||
else self.region_demand_control_factor[region]
|
||||
|
||||
def get_old_pattern_and_flow(self):
|
||||
"""获取所有pattern的选定时段的历史记录的pattern和flow"""
|
||||
for idx in monitor_patterns: # 遍历patterns
|
||||
old_flows, old_patterns = self.get_history_pattern_info(self.project_name, idx)
|
||||
for pattern_idx in self.pattern_index_list:
|
||||
old_flow_data = old_flows[pattern_idx]
|
||||
old_pattern_factor = old_patterns[pattern_idx]
|
||||
if pattern_idx == self.pattern_start_index: # 起始时刻
|
||||
self.old_flow_data[idx][0] = old_flow_data
|
||||
self.old_pattern_factor[idx][0] = old_pattern_factor
|
||||
else:
|
||||
self.old_flow_data[idx].append(old_flow_data)
|
||||
self.old_pattern_factor[idx].append(old_pattern_factor)
|
||||
|
||||
def set_new_flow(self):
|
||||
"""计算模拟时段新流量(相较于历史记录)"""
|
||||
for idx in self.new_flow_data.keys(): # 遍历patterns
|
||||
region_name = None
|
||||
for region in regions_patterns.keys():
|
||||
if idx in regions_patterns[region]:
|
||||
region_name = region # pattern所属分区
|
||||
break
|
||||
# 实时流量
|
||||
if self.updating_data_flag is True:
|
||||
if idx in monitor_unity_patterns[-3:]: # 出水管流量
|
||||
self.new_flow_data[idx][0] = self.current_time_download_data[monitor_patterns_id[idx]]
|
||||
else: # 其余流量
|
||||
self.new_flow_data[idx][0] \
|
||||
= self.region_demand_control_factor[region_name] * self.old_flow_data[idx][0]
|
||||
# if idx == 'ZiYunTai':
|
||||
# idx_a, idx_b = monitor_patterns_id[idx].split(',')
|
||||
# self.new_flow_data[idx][0] \
|
||||
# = self.current_time_download_data[idx_a] - self.current_time_download_data[idx_b]
|
||||
# else:
|
||||
# self.new_flow_data[idx][0] = self.current_time_download_data[monitor_patterns_id[idx]]
|
||||
# for data_id in monitor_patterns_id[idx].split(','):
|
||||
# if (self.current_time_download_data_flag[data_id] is False) \
|
||||
# and (idx not in [pipe for pipe_list in regions_demand_patterns.values()
|
||||
# for pipe in pipe_list]): # 无法获取实时数据
|
||||
# self.new_flow_data[idx][0] \
|
||||
# = self.region_demand_control_factor[region_name] * self.old_flow_data[idx][0]
|
||||
# break
|
||||
# 根据设定用水量修改新流量
|
||||
if (self.region_demand_control is not None) \
|
||||
and (region_name in self.region_demand_control.keys()):
|
||||
for pattern_idx in self.pattern_index_list:
|
||||
if pattern_idx == self.pattern_start_index: # 起始时刻
|
||||
self.new_flow_data[idx][0] \
|
||||
= self.region_demand_control_factor[region_name] * self.old_flow_data[idx][0]
|
||||
else:
|
||||
self.new_flow_data[idx].append(
|
||||
self.region_demand_control_factor[region_name]
|
||||
* self.old_flow_data[idx][self.pattern_index_list.index(pattern_idx)]
|
||||
)
|
||||
|
||||
def set_new_pattern_factor(self):
|
||||
"""更新计算选定时段(设定用水量)/时刻的pattern factor"""
|
||||
pattern_index_list = self.pattern_index_list \
|
||||
if self.region_demand_control is not None \
|
||||
else [self.pattern_start_index]
|
||||
for idx in monitor_patterns: # 遍历patterns
|
||||
for pattern_idx in pattern_index_list: # 遍历需要修改的pattern(index)
|
||||
pattern_idx_cls = pattern_index_list.index(pattern_idx) # 转换index(类表存储结构)
|
||||
old_flow_data = self.old_flow_data[idx][pattern_idx_cls]
|
||||
old_pattern_factor = self.old_pattern_factor[idx][pattern_idx_cls]
|
||||
if pattern_idx_cls == 0: # 起始时刻
|
||||
if idx in monitor_single_patterns:
|
||||
if not np.isnan(self.new_flow_data[idx][0]):
|
||||
self.new_pattern_factor[idx][0] = (self.new_flow_data[idx][0] * 1000 / 3600) # m3/h to L/s
|
||||
if idx in monitor_unity_patterns:
|
||||
if not np.isnan(self.new_flow_data[idx][0]):
|
||||
self.new_pattern_factor[idx][0] \
|
||||
= old_pattern_factor * self.new_flow_data[idx][0] / old_flow_data
|
||||
else:
|
||||
if idx in monitor_single_patterns:
|
||||
if len(self.new_flow_data[idx]) > pattern_idx_cls:
|
||||
self.new_pattern_factor[idx].append(
|
||||
(self.new_flow_data[idx][pattern_idx_cls] * 1000 / 3600)) # m3/h to L/s
|
||||
if idx in monitor_unity_patterns:
|
||||
if len(self.new_flow_data[idx]) > pattern_idx_cls:
|
||||
self.new_pattern_factor[idx].append(
|
||||
old_pattern_factor
|
||||
* self.new_flow_data[idx][pattern_idx_cls]
|
||||
/ old_flow_data)
|
||||
|
||||
def set_reservoirs(self):
|
||||
"""设置清水池"""
|
||||
if self.updating_data_flag is True:
|
||||
for idx in self.reservoir_data.keys():
|
||||
if self.current_time_download_data_flag[reservoirs_id[idx]] is False: # 无法获取实时数据
|
||||
print('There is no current data of reservoir: {}.'.format(idx))
|
||||
else:
|
||||
self.reservoir_data[idx] \
|
||||
= self.current_time_download_data[reservoirs_id[idx]] + RESERVOIR_BASIC_HEIGHT
|
||||
|
||||
def set_tanks(self):
|
||||
"""设置调节池"""
|
||||
for idx in self.tank_data.keys():
|
||||
if self.updating_data_flag is True:
|
||||
if self.current_time_download_data_flag[tanks_id[idx]] is False: # 无法获取实时数据
|
||||
print('There is no current data of tank: {}.'.format(idx))
|
||||
else:
|
||||
self.tank_data[idx] = self.current_time_download_data[tanks_id[idx]]
|
||||
self.tank_data[idx] = self.tank_initial_level_control[idx] \
|
||||
if (self.tank_initial_level_control is not None) and (idx in self.tank_initial_level_control) \
|
||||
else self.tank_data[idx]
|
||||
|
||||
def set_pumps(self):
|
||||
"""设置水泵"""
|
||||
for idx in self.pump_data.keys():
|
||||
if self.updating_data_flag is True:
|
||||
if self.current_time_download_data_flag[pumps_id[idx]] is False: # 无法获取实时数据
|
||||
print('There is no current data of pump: {}.'.format(idx))
|
||||
if (self.pump_control is not None) and (idx in self.pump_control.keys()):
|
||||
self.pump_data[idx] = self.pump_control[idx]
|
||||
else:
|
||||
self.pump_data[idx] = [self.current_time_download_data[pumps_id[idx]]]
|
||||
if (self.pump_control is not None) and (idx in self.pump_control.keys()):
|
||||
self.pump_data[idx] = self.pump_data[idx] + self.pump_control[idx] \
|
||||
if len(self.pump_control[idx]) < len(self.pattern_index_list) \
|
||||
else self.pump_control[idx] # 水泵设定
|
||||
else:
|
||||
if (self.pump_control is not None) and (idx in self.pump_control.keys()):
|
||||
self.pump_data[idx] = self.pump_control[idx]
|
||||
self.pump_data[idx] \
|
||||
= list(np.array(self.pump_data[idx]) / 50) \
|
||||
if idx in variable_frequency_pumps else self.pump_data[idx]
|
||||
|
||||
def set_valves(self):
|
||||
"""设置阀门"""
|
||||
pass
|
||||
|
||||
def download_real_data(self, ids: str):
|
||||
"""加载实时数据"""
|
||||
# 数据接口的地址
|
||||
global url_real
|
||||
# 设置GET请求的参数
|
||||
params = {'ids': ids}
|
||||
# 发送GET请求获取数据
|
||||
response = requests.get(url_real, params=params)
|
||||
# 检查响应状态码,200表示请求成功
|
||||
if response.status_code == 200:
|
||||
newest_data_time = None # 下载记录数据的最新时间
|
||||
# 解析响应的JSON数据
|
||||
data = response.json()
|
||||
for realValue in data: # 取出逐个id的数据
|
||||
data_time = convert_utc_to_bj(realValue['datadt']) # datetime
|
||||
self.current_time_download_data[str(realValue['id'])] \
|
||||
= float(realValue['realValue']) # {id(str): value(float)}
|
||||
if data_time > self.current_round_time.replace(tzinfo=None) - timedelta(minutes=5): # 下载数据为实时数据
|
||||
self.current_time_download_data_flag[str(realValue['id'])] = True
|
||||
if newest_data_time is None:
|
||||
newest_data_time = data_time
|
||||
else:
|
||||
newest_data_time = data_time if data_time > newest_data_time else newest_data_time # 更新最新时间
|
||||
if newest_data_time <= self.current_round_time.replace(tzinfo=None) - timedelta(minutes=5): # 最新记录时间早于当前时间
|
||||
warning_text = 'There is no current data with newest timestamp: {}.'.format(
|
||||
newest_data_time.strftime('%Y-%m-%d %H:%M:%S'))
|
||||
delta_time = self.current_round_time.replace(tzinfo=None) - newest_data_time
|
||||
if delta_time < timedelta(minutes=PATTERN_TIME_STEP): # 时间接近(可等待再次下载)
|
||||
raise Exception(warning_text)
|
||||
else:
|
||||
print(warning_text)
|
||||
self.updating_data_flag = False
|
||||
else:
|
||||
for idx in monitor_unity_patterns[-3:]: # 出水管流量
|
||||
if self.current_time_download_data_flag[monitor_patterns_id[idx]] is False: # 无法获取出水管流量的实时数据
|
||||
print('There is no current data of outflow: {}.'.format(idx))
|
||||
self.updating_data_flag = False
|
||||
if self.updating_data_flag is False:
|
||||
print('Abandon updating data with downloaded data.')
|
||||
return newest_data_time
|
||||
else:
|
||||
# 如果请求不成功,打印错误信息
|
||||
print("请求失败,状态码:", response.status_code)
|
||||
raise ConnectionError('Cannot download data.')
|
||||
|
||||
@ staticmethod
|
||||
def init_dict_of_list(dict_of_list):
|
||||
"""初始化值为列表的字典(重新生成列表地址, 防止指向同一列表)"""
|
||||
for idx in dict_of_list.keys():
|
||||
dict_of_list[idx] = dict_of_list[idx].copy()
|
||||
return dict_of_list
|
||||
|
||||
@ staticmethod
|
||||
def get_download_id():
|
||||
"""生成下载数据项的id"""
|
||||
# id_list = (list(monitor_single_patterns_id.values())
|
||||
# + list(monitor_unity_patterns_id.values())
|
||||
# + list(tanks_id.values())
|
||||
# + list(reservoirs_id.values())
|
||||
# + list(pumps_id.values()))
|
||||
id_list = (list(monitor_unity_patterns_id.values())[-3:]
|
||||
+ list(tanks_id.values())
|
||||
+ list(reservoirs_id.values())
|
||||
+ list(pumps_id.values()))
|
||||
id_list = sorted(set(id_list), key=id_list.index)
|
||||
if None in id_list:
|
||||
id_list.remove(None)
|
||||
return ','.join(id_list)
|
||||
|
||||
@ staticmethod
|
||||
def get_history_pattern_info(project_name, pattern_name):
|
||||
"""读取选定pattern的保存的历史pattern信息(flow, factor)"""
|
||||
factors_list = []
|
||||
flow_list = []
|
||||
patterns_info = read_all(project_name,
|
||||
f"select * from history_patterns_flows where id = '{pattern_name}' order by _order")
|
||||
for item in patterns_info:
|
||||
flow_list.append(float(item['flow']))
|
||||
factors_list.append(float(item['factor']))
|
||||
return flow_list, factors_list
|
||||
|
||||
@ staticmethod
|
||||
def judge_time(current_time, time_index_list):
|
||||
"""时间判断"""
|
||||
current_index \
|
||||
= time_index_list.index(current_time) if (current_time in time_index_list) else None
|
||||
return current_index
|
||||
|
||||
@staticmethod
|
||||
def get_time_index_list(start_time: datetime, end_time: datetime, step: int):
|
||||
"""生成时间索引"""
|
||||
time_index_list = [] # 时间索引[str]
|
||||
time_index = start_time
|
||||
while time_index <= end_time:
|
||||
time_index_list.append(time_index)
|
||||
time_index += timedelta(minutes=step)
|
||||
return time_index_list
|
||||
|
||||
@ staticmethod
|
||||
def round_time(time_: datetime, interval=5):
|
||||
"""时间向下取整到整n分钟(北京时间): 四舍六入五留双/向下取整"""
|
||||
# return datetime.fromtimestamp(round(time_.timestamp() / (60 * interval)) * (60 * interval))
|
||||
return datetime.fromtimestamp(int((time_.timestamp()) // (60 * interval)) * (60 * interval))
|
||||
|
||||
|
||||
def convert_utc_to_bj(utc_time_str):
|
||||
"""将utc时间(str)转换成北京时间(datetime)"""
|
||||
# 解析UTC时间字符串为datetime对象
|
||||
utc_time = datetime.strptime(utc_time_str, '%Y-%m-%dT%H:%M:%SZ')
|
||||
# 设定UTC时区
|
||||
utc_timezone = pytz.timezone('UTC')
|
||||
# 转换为北京时间
|
||||
beijing_timezone = pytz.timezone('Asia/Shanghai')
|
||||
beijing_time = utc_time.replace(tzinfo=utc_timezone).astimezone(beijing_timezone).replace(tzinfo=None)
|
||||
return beijing_time
|
||||
|
||||
|
||||
def get_datetime(cur_datetime:str):
|
||||
str_format = "%Y-%m-%d %H:%M:%S"
|
||||
return datetime.strptime(cur_datetime, str_format)
|
||||
|
||||
|
||||
def get_strftime(cur_datetime: datetime):
|
||||
str_format = "%Y-%m-%d %H:%M:%S"
|
||||
return cur_datetime.strftime(str_format)
|
||||
|
||||
|
||||
def step_time(cur_datetime:str, step=5):
|
||||
str_format="%Y-%m-%d %H:%M:%S"
|
||||
dt=datetime.strptime(cur_datetime,str_format)
|
||||
dt=dt+timedelta(minutes=step)
|
||||
return datetime.strftime(dt,str_format)
|
||||
|
||||
|
||||
def get_pattern_index(cur_datetime:str)->int:
|
||||
str_format="%Y-%m-%d %H:%M:%S"
|
||||
dt=datetime.strptime(cur_datetime,str_format)
|
||||
hr=dt.hour
|
||||
mnt=dt.minute
|
||||
i=int((hr*60+mnt)/PATTERN_TIME_STEP)
|
||||
return i
|
||||
|
||||
def get_pattern_index_str(cur_datetime:str)->str:
|
||||
i=get_pattern_index(cur_datetime)
|
||||
[minN,hrN]=modf(i*PATTERN_TIME_STEP/60)
|
||||
minN_str=str(int(minN*60))
|
||||
minN_str=minN_str.zfill(2)
|
||||
hrN_str=str(int(hrN))
|
||||
hrN_str=hrN_str.zfill(2)
|
||||
str_i='{}:{}:00'.format(hrN_str,minN_str)
|
||||
return str_i
|
||||
|
||||
|
||||
def from_seconds_to_clock (secs: int)->str:
|
||||
hrs=int(secs/3600)
|
||||
minutes=int((secs-hrs*3600)/60)
|
||||
seconds=(secs-hrs*3600-minutes*60)
|
||||
hrs_str=str(hrs).zfill(2)
|
||||
minutes_str=str(minutes).zfill(2)
|
||||
seconds_str=str(seconds).zfill(2)
|
||||
str_clock='{}:{}:{}'.format(hrs_str,minutes_str,seconds_str)
|
||||
return str_clock
|
||||
|
||||
def from_clock_to_seconds (clock: str)->int:
|
||||
str_format="%Y-%m-%d %H:%M:%S"
|
||||
dt=datetime.strptime(clock,str_format)
|
||||
hr=dt.hour
|
||||
mnt=dt.minute
|
||||
seconds=dt.second
|
||||
return hr*3600+mnt*60+seconds
|
||||
|
||||
def from_clock_to_seconds_2 (clock: str)->int:
|
||||
str_format="%H:%M:%S"
|
||||
dt=datetime.strptime(clock,str_format)
|
||||
hr=dt.hour
|
||||
mnt=dt.minute
|
||||
seconds=dt.second
|
||||
return hr*3600+mnt*60+seconds
|
||||
|
||||
|
||||
def from_clock_to_seconds_3 (clock: str)->int:
|
||||
str_format = "%H:%M" # 更新时间格式以适应 "小时:分钟" 格式
|
||||
dt = datetime.strptime(clock,str_format)
|
||||
hr = dt.hour
|
||||
mnt = dt.minute
|
||||
seconds = dt.second
|
||||
return hr * 3600 + mnt * 60
|
||||
|
||||
|
||||
###convert datetimestring
|
||||
##"XXXX-XX-XXT00:00:00Z" ->"XXXX-XX-XX 00:00:00"
|
||||
def trim_time_flag(url_date_time:str)->str:
|
||||
str_datetime=str.replace(url_date_time,'T',' ')
|
||||
str_datetime=str.replace(str_datetime,'Z','')
|
||||
return str_datetime
|
||||
|
||||
# 单时间步长模拟
|
||||
def run_simulation(name:str,start_datetime:str,end_datetime:str=None, duration:int=900)->str:
|
||||
if(is_project_open(name)):
|
||||
close_project(name)
|
||||
open_project(name)
|
||||
#get_current_data(cur_datetime)
|
||||
#extract the patternindex from datetime
|
||||
#e.g. 0: the first time step for 00:00-00:14; 1: the second step for 00:15-00:30
|
||||
start_datetime=trim_time_flag(start_datetime)
|
||||
if(end_datetime!=None):
|
||||
end_datetime=trim_time_flag(end_datetime)
|
||||
|
||||
|
||||
|
||||
## redistribute the basedemand according to the currentTotalQ and the base_totalQ
|
||||
# step 1. get_real _data
|
||||
if end_datetime==None or start_datetime==end_datetime:
|
||||
end_datetime=step_time(start_datetime)
|
||||
|
||||
# # ids=['2498','3854','3853','2510','2514','4780','4854']
|
||||
# # real_data=get_real_data(ids,start_datetime,end_datetime)
|
||||
# print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S")+"--获取实时数据完毕\n")
|
||||
# #step 2. re-distribute the real q to base demand of the node region_sa by region_sa
|
||||
# regions=get_all_service_area_ids(name)
|
||||
# total_demands={}
|
||||
# for region in regions:
|
||||
# total_demands[region]=get_total_base_demand(name,region)
|
||||
# region_demand_factor={}
|
||||
# #Region_ID:SA_ZBBDJSCP000002 高区;SA_R00003+SA_ZBBDTJSC000001 低区
|
||||
# H_region_real_demands=real_data[DN_900_ID][start_datetime]+real_data[DN_500_ID][start_datetime]
|
||||
# L_region_real_demands=real_data[DN_1000_ID][start_datetime]
|
||||
# factor_H_zone=H_region_real_demands/total_demands[H_REGION_1]/3.6 #3.6: m3/h->L/s
|
||||
# factor_L_zone=L_region_real_demands/(total_demands[L_REGION_1]+total_demands[L_REGION_2])/3.6
|
||||
# print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S")+"--流量因子计算完毕完毕\n")
|
||||
# for region in regions:
|
||||
# region_nodes=get_nodes_in_region(name,region)
|
||||
# factor=1
|
||||
# if region==H_REGION_1 or H_REGION_2:
|
||||
# factor=factor_H_zone
|
||||
# else:
|
||||
# factor=factor_L_zone
|
||||
#
|
||||
# for node in region_nodes:
|
||||
# d=get_demand(name,node)
|
||||
# for r in d['demands']:
|
||||
# r['demand']=factor*r['demand']
|
||||
# cs=ChangeSet()
|
||||
# cs.append(d)
|
||||
# set_demand(name,cs)
|
||||
#
|
||||
# #
|
||||
# #
|
||||
# print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S")+"--节点流量重分配完毕\n")
|
||||
|
||||
#step 3. set pattern index to the current time,and set duration to 300 secs
|
||||
#
|
||||
str_pattern_start=get_pattern_index_str(start_datetime)
|
||||
dic_time=get_time(name)
|
||||
dic_time['PATTERN START']=str_pattern_start
|
||||
|
||||
if duration !=None:
|
||||
dic_time['DURATION']=from_seconds_to_clock(duration)
|
||||
else:
|
||||
dic_time['DURATION']=dic_time['HYDRAULIC TIMESTEP']
|
||||
cs=ChangeSet()
|
||||
cs.operations.append(dic_time)
|
||||
set_time(name,cs)
|
||||
|
||||
# step4. run simulation and save the result to name-time.out for download
|
||||
#inp_file = 'inp\\'+name+'.inp'
|
||||
#db_name=name
|
||||
#dump_inp(db_name,inp_file,'2')
|
||||
# result=run_inp(db_name)
|
||||
result=run_project(name)
|
||||
#json string format
|
||||
# simulation_result, output, report
|
||||
result_data=json.loads(result)
|
||||
#print(result_data['simulation_result'])
|
||||
print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S")+'run finished successfully\n')
|
||||
#print(result_data['report'])
|
||||
return result
|
||||
|
||||
|
||||
# 在线模拟
|
||||
def run_simulation_ex(name: str, simulation_type: str, start_datetime: str,
|
||||
end_datetime: str = None, duration: int = 0,
|
||||
pump_control: dict[str, list] = None, tank_initial_level_control: dict[str, float] = None,
|
||||
region_demand_control: dict[str, float] = None, valve_control: dict[str, dict] = None,
|
||||
downloading_prohibition: bool = False) -> str:
|
||||
time_cost_start = time.perf_counter()
|
||||
print('{} -- Hydraulic simulation started.'.format(
|
||||
datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S')))
|
||||
|
||||
if is_project_open(name):
|
||||
close_project(name)
|
||||
|
||||
if simulation_type.upper() == 'REALTIME': # 实时模拟(修改原数据库)
|
||||
name_c = name
|
||||
elif simulation_type.upper() == 'EXTENDED': # 扩展模拟(复制数据库)
|
||||
name_c = '_'.join([name, 'c'])
|
||||
if have_project(name_c):
|
||||
if is_project_open(name_c):
|
||||
close_project(name_c)
|
||||
delete_project(name_c)
|
||||
copy_project(name, name_c) # 备份项目
|
||||
else:
|
||||
raise Exception('Incorrect simulation type, choose in (realtime, extended)')
|
||||
|
||||
open_project(name_c)
|
||||
|
||||
# 时间处理
|
||||
# extract the pattern index from datetime
|
||||
# e.g. 0: the first time step for 00:00-00:14; 1: the second step for 00:15-00:30
|
||||
# start_datetime = get_strftime(convert_utc_to_bj(start_datetime))
|
||||
start_datetime = trim_time_flag(start_datetime)
|
||||
if end_datetime is not None:
|
||||
# end_datetime = get_strftime(convert_utc_to_bj(end_datetime))
|
||||
end_datetime = trim_time_flag(end_datetime)
|
||||
|
||||
# pump name转化/输入值规范化
|
||||
if pump_control is not None:
|
||||
for key in list(pump_control.keys()):
|
||||
pump_control[key] = [pump_control[key]] if type(pump_control[key]) is not list else pump_control[key]
|
||||
pump_control[pumps[pumps_name.index(key)]] = pump_control.pop(key)
|
||||
|
||||
# 重新分配节点(nodes)水量
|
||||
# 1) (single)base_demand_new=1, pattern_new=real_data
|
||||
# 2) (unity)base_demand_new=base_demand_old, pattern_new=factor*pattern_old(factor=flow_new/flow_old)
|
||||
# 获取需水量数据
|
||||
# a) 历史pattern对应水量(读取保存数据库)
|
||||
# b) 实时水量(数据接口下载)
|
||||
|
||||
# 修改node demand = 1, pattern factor *= demand(monitor single patterns对应node)
|
||||
# nodes = get_nodes(name_c) # nodes
|
||||
# for node_name in nodes: # 遍历nodes
|
||||
# demands_dict = get_demand(name_c, node_name) # {'demands':[{'demand':, 'pattern':}]}
|
||||
# for demands in demands_dict['demands']:
|
||||
# if (demands['pattern'] in monitor_single_patterns) and (demands['demand'] != 1): # 1)
|
||||
# pattern = get_pattern(name_c, demands['pattern'])
|
||||
# pattern['factors'] = list(demands['demand'] * np.array(pattern['factors'])) # 修改pattern
|
||||
# cs = ChangeSet()
|
||||
# cs.append(pattern)
|
||||
# set_pattern(name_c, cs)
|
||||
# demands_dict['demands'][
|
||||
# demands_dict['demands'].index(demands)
|
||||
# ]['demand'] = 1 # 修改demand
|
||||
# cs = ChangeSet()
|
||||
# cs.append(demands_dict)
|
||||
# set_demand(name_c, cs)
|
||||
|
||||
start_time = get_datetime(start_datetime) # datetime
|
||||
end_time = get_datetime(end_datetime) \
|
||||
if end_datetime is not None \
|
||||
else get_datetime(start_datetime) + timedelta(seconds=duration) # datetime
|
||||
# modify_pattern_start_index = get_pattern_index(start_datetime) # 待修改pattern的起始索引(int)
|
||||
|
||||
dataset_loader = DataLoader(project_name=name_c,
|
||||
start_time=start_time, end_time=end_time,
|
||||
pumps_control=pump_control, tank_initial_level_control=tank_initial_level_control,
|
||||
region_demand_control=region_demand_control,
|
||||
downloading_prohibition=downloading_prohibition) # 实例化数据加载器
|
||||
modify_index \
|
||||
= dataset_loader.load_data() # 加载数据(index: 需要修改pattern的factor index, None: 无需修改除水泵和调节池外pattern)
|
||||
new_patterns \
|
||||
= dataset_loader.new_pattern_factor # {name: float,} pattern factor(实时: 更新, 其他: 保持/更新(设定用水量时))
|
||||
tank_init_level = dataset_loader.tank_data # {name: float,} 调节池初始液位(实时: 更新, 其他: 保持/更新(设定液位时))
|
||||
reservoir_level = dataset_loader.reservoir_data # {name: float,} 水库液位(实时: 更新, 其他: 保持)
|
||||
pump_freq = dataset_loader.pump_data # {name: [float,]} 水泵频率(实时: 更新, 其他: 保持/更新(设定状态时))
|
||||
print(datetime.now(pytz.timezone('Asia/Shanghai')).strftime("%Y-%m-%d %H:%M:%S") + " -- Loading data ok.\n")
|
||||
|
||||
pattern_name_list = get_patterns(name_c) # 所有pattern
|
||||
# 修改node pattern/demand
|
||||
# nodes = get_nodes(name_c) # nodes
|
||||
# for node_name in nodes: # 遍历nodes
|
||||
# demands_dict = get_demand(name_c, node_name) # {'demands':[{'demand':, 'pattern':}]}
|
||||
# for demands in demands_dict['demands']:
|
||||
# if demands['pattern'] in monitor_single_patterns: # 1)
|
||||
# demands_dict['demands'][
|
||||
# demands_dict['demands'].index(demands)
|
||||
# ]['demand'] = 1 # 修改demand
|
||||
# pattern = get_pattern(name_c, demands['pattern'])
|
||||
# pattern['factors'][modify_index] = flow_new[demands['pattern']] # 修改pattern
|
||||
# cs = ChangeSet()
|
||||
# cs.append(pattern)
|
||||
# set_pattern(name_c, cs)
|
||||
# if demands['pattern'] in pattern_name_list:
|
||||
# pattern_name_list.remove(demands['pattern']) # 移出待修改pattern列表
|
||||
# else: # 2)
|
||||
# continue
|
||||
# cs = ChangeSet()
|
||||
# cs.append(demands_dict)
|
||||
# set_demand(name_c, cs)
|
||||
for pattern_name in monitor_patterns: # 遍历patterns
|
||||
if not np.isnan(new_patterns[pattern_name][0]):
|
||||
pattern = get_pattern(name_c, pattern_name)
|
||||
pattern['factors'][modify_index:
|
||||
modify_index + len(new_patterns[pattern_name])] \
|
||||
= new_patterns[pattern_name]
|
||||
cs = ChangeSet()
|
||||
cs.append(pattern)
|
||||
set_pattern(name_c, cs)
|
||||
if pattern_name in pattern_name_list:
|
||||
pattern_name_list.remove(pattern_name) # 移出待修改pattern列表
|
||||
|
||||
# 修改清水池(reservoir)液位pattern
|
||||
for reservoir_name in reservoirs: # 遍历reservoirs
|
||||
if (not np.isnan(reservoir_level[reservoir_name])) and (reservoir_level[reservoir_name] != 0):
|
||||
reservoir_pattern = get_pattern(name_c, get_reservoir(name_c, reservoir_name)['pattern'])
|
||||
reservoir_pattern['factors'][modify_index] = reservoir_level[reservoir_name]
|
||||
cs = ChangeSet()
|
||||
cs.append(reservoir_pattern)
|
||||
set_pattern(name_c, cs)
|
||||
if reservoir_pattern['id'] in pattern_name_list:
|
||||
pattern_name_list.remove(reservoir_pattern['id']) # 移出待修改pattern列表
|
||||
|
||||
# 修改调节池(tank)初始液位
|
||||
for tank_name in tanks: # 遍历tanks
|
||||
if (not np.isnan(tank_init_level[tank_name])) and (tank_init_level[tank_name] != 0):
|
||||
tank = get_tank(name_c, tank_name)
|
||||
tank['init_level'] = tank_init_level[tank_name]
|
||||
cs = ChangeSet()
|
||||
cs.append(tank)
|
||||
set_tank(name_c, cs)
|
||||
|
||||
# 修改水泵(pump)pattern
|
||||
for pump_name in pumps: # 遍历pumps
|
||||
if not np.isnan(pump_freq[pump_name][0]):
|
||||
pump_pattern = get_pattern(name_c, get_pump(name_c, pump_name)['pattern'])
|
||||
pump_pattern['factors'][modify_index
|
||||
:modify_index + len(pump_freq[pump_name])] \
|
||||
= pump_freq[pump_name]
|
||||
cs = ChangeSet()
|
||||
cs.append(pump_pattern)
|
||||
set_pattern(name_c, cs)
|
||||
if pump_pattern['id'] in pattern_name_list:
|
||||
pattern_name_list.remove(pump_pattern['id']) # 移出待修改pattern列表
|
||||
# 修改阀门(valve)status和setting
|
||||
if valve_control is not None:
|
||||
for valve in valve_control.keys():
|
||||
status = get_status(name_c, valve)
|
||||
if 'status' in valve_control[valve].keys():
|
||||
status['status'] = valve_control[valve]['status']
|
||||
if 'setting' in valve_control[valve].keys():
|
||||
status['setting'] = valve_control[valve]['setting']
|
||||
if 'k' in valve_control[valve].keys():
|
||||
valve_k = valve_control[valve]['k']
|
||||
if valve_k == 0:
|
||||
status['status'] = 'CLOSED'
|
||||
else:
|
||||
status['setting'] = 0.1036 * pow(valve_k, -3.105)
|
||||
cs = ChangeSet()
|
||||
cs.append(status)
|
||||
set_status(name_c, cs)
|
||||
|
||||
print('Finish demands amending, unmodified patterns: {}.'.format(pattern_name_list))
|
||||
|
||||
# 修改时间信息
|
||||
str_pattern_start = get_pattern_index_str(
|
||||
DataLoader.round_time(start_time, int(PATTERN_TIME_STEP)).strftime("%Y-%m-%d %H:%M:%S"))
|
||||
dic_time = get_time(name_c)
|
||||
dic_time['PATTERN START'] = str_pattern_start
|
||||
|
||||
if duration is not None:
|
||||
dic_time['DURATION'] = from_seconds_to_clock(duration)
|
||||
else:
|
||||
dic_time['DURATION'] = dic_time['HYDRAULIC TIMESTEP']
|
||||
cs = ChangeSet()
|
||||
cs.operations.append(dic_time)
|
||||
set_time(name_c, cs)
|
||||
|
||||
# 运行并返回结果
|
||||
result = run_project(name_c)
|
||||
|
||||
time_cost_end = time.perf_counter()
|
||||
print('{} -- Hydraulic simulation finished, cost time: {:.2f} s.'.format(
|
||||
datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'),
|
||||
time_cost_end - time_cost_start))
|
||||
|
||||
close_project(name_c)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# if get_current_data()==True:
|
||||
# tQ=get_current_total_Q()
|
||||
# print(f"the current tQ is {tQ}\n")
|
||||
# data=get_hist_data(ids,conver_beingtime_to_ucttime('2024-04-10 15:05:00'),conver_beingtime_to_ucttime('2024-04-10 15:10:00'))
|
||||
# open_project("beibeizone")
|
||||
# read_inp("beibeizone","beibeizone-export_nochinese.inp")
|
||||
# run_simulation("beibeizone","2024-04-01T08:00:00Z")
|
||||
# read_inp('bb_server', 'model20_en.inp')
|
||||
run_simulation_ex(
|
||||
name=project_info.name, simulation_type='extended', start_datetime='2024-11-09T02:30:00Z',
|
||||
# end_datetime='2024-05-30T16:00:00Z',
|
||||
# duration=0,
|
||||
# pump_control={'PU00006': [45, 40]}
|
||||
# region_demand_control={'hp': 6000, 'lp': 2000}
|
||||
)
|
||||
654
app/algorithms/api_ex/sensitivity.py
Normal file
654
app/algorithms/api_ex/sensitivity.py
Normal file
@@ -0,0 +1,654 @@
|
||||
# 改进灵敏度法
|
||||
import networkx
|
||||
import numpy as np
|
||||
import pandas
|
||||
import wntr
|
||||
import pandas as pd
|
||||
import copy
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
from sklearn.cluster import KMeans
|
||||
from wntr.epanet.toolkit import EpanetException
|
||||
from numpy.linalg import slogdet
|
||||
import random
|
||||
from app.services.tjnetwork import *
|
||||
from matplotlib.lines import Line2D
|
||||
from sklearn.cluster import SpectralClustering
|
||||
import libpysal as ps
|
||||
from spopt.region import Skater
|
||||
from shapely.geometry import Point
|
||||
import geopandas as gpd
|
||||
from sklearn.metrics import pairwise_distances
|
||||
import app.services.project_info as project_info
|
||||
|
||||
# 2025/03/12
|
||||
# Step1: 获取节点坐标
|
||||
def getCoor(wn: wntr.network.WaterNetworkModel) -> pandas.DataFrame:
|
||||
"""
|
||||
获取管网模型的节点坐标
|
||||
:param wn: 由wntr生成的模型
|
||||
:return: 节点坐标
|
||||
"""
|
||||
# site: pandas.Series
|
||||
# index:节点名称(wn.node_name_list)
|
||||
# values:每个节点的坐标,格式为 tuple(如 (x, y) 或 (x, y, z))
|
||||
site = wn.query_node_attribute('coordinates')
|
||||
# Coor: pandas.Series
|
||||
# index:与site相同(节点名称)。
|
||||
# values:坐标转换为numpy.ndarray(如array([10.5, 20.3]))
|
||||
Coor = site.apply(lambda x: np.array(x)) # 将节点坐标转换为numpy数组
|
||||
# x, y: list[float]
|
||||
x = [] # 存储所有节点的 x 坐标
|
||||
y = [] # 存储所有节点的 y 坐标
|
||||
for i in range(0, len(Coor)):
|
||||
x.append(Coor.values[i][0]) # 将 x 坐标存入 x 列表。
|
||||
y.append(Coor.values[i][1]) # 将 y 坐标存入 y 列表
|
||||
# xy: dict[str, list], x、y 坐标的字典
|
||||
xy = {'x': x, 'y': y}
|
||||
# Coor_node: pandas.DataFrame, 存储节点 x, y 坐标的 DataFrame
|
||||
Coor_node = pd.DataFrame(xy, index=wn.node_name_list, columns=['x', 'y'])
|
||||
return Coor_node
|
||||
|
||||
|
||||
# 2025/03/12
|
||||
# Step2: KMeans 聚类
|
||||
# 将节点用kmeans根据坐标分为k组,存入字典g
|
||||
def kgroup(coor: pandas.DataFrame, knum: int) -> dict[int, list[str]]:
|
||||
"""
|
||||
使用KMeans聚类,将节点坐标分组
|
||||
:param coor: 存储所有节点的坐标数据
|
||||
:param knum: 需要分成的聚类数
|
||||
:return: 聚类结果字典
|
||||
"""
|
||||
g = {}
|
||||
# estimator: sklearn.cluster.KMeans,KMeans 聚类模型
|
||||
estimator = KMeans(n_clusters=knum)
|
||||
estimator.fit(coor)
|
||||
# label_pred: numpy.ndarray(int),每个点的类别标签
|
||||
label_pred = estimator.labels_
|
||||
for i in range(0, knum):
|
||||
g[i] = coor[label_pred == i].index.tolist()
|
||||
return g
|
||||
|
||||
|
||||
def skater_partition(G, n_clusters):
|
||||
"""
|
||||
使用 SKATER 算法对输入的无向图 G 进行区域划分,
|
||||
保证每个划分区域在图论意义上是连通的,
|
||||
同时依据节点坐标的空间信息进行划分。
|
||||
|
||||
参数:
|
||||
G: networkx.Graph
|
||||
带有节点坐标属性(键为 'pos')的无向图。
|
||||
n_clusters: int
|
||||
希望划分的区域数量。
|
||||
|
||||
返回:
|
||||
groups: dict
|
||||
字典形式的聚类结果,键为区域编号,值为该区域内的节点列表。
|
||||
"""
|
||||
# 1. 获取所有节点坐标,假设每个节点都有 'pos' 属性
|
||||
pos = nx.get_node_attributes(G, 'pos')
|
||||
nodes = list(G.nodes())
|
||||
# 构造坐标数组:每行为 [x, y]
|
||||
coords = np.array([pos[node] for node in nodes])
|
||||
|
||||
# 2. 构造 GeoDataFrame:创建 DataFrame 并生成 geometry 列
|
||||
df = pd.DataFrame(coords, columns=['x', 'y'], index=nodes)
|
||||
# 利用 shapely 的 Point 构造空间位置
|
||||
df['geometry'] = df.apply(lambda row: Point(row['x'], row['y']), axis=1)
|
||||
gdf = gpd.GeoDataFrame(df, geometry='geometry')
|
||||
|
||||
# 3. 构造空间权重矩阵,使用 4 近邻方法(k=4,可根据实际情况调整)
|
||||
w = ps.weights.KNN.from_array(coords, k=4)
|
||||
w.transform = 'R'
|
||||
|
||||
# 4. 调用 SKATER:新版本 API 要求传入 gdf, w 以及 attrs_name(这里使用 'x' 和 'y' 作为属性)
|
||||
skater = Skater(gdf, w, attrs_name=['x', 'y'], n_clusters=n_clusters)
|
||||
skater.solve()
|
||||
|
||||
# 5. 获取聚类标签,构造成字典格式
|
||||
labels = skater.labels_
|
||||
groups = {}
|
||||
for label, node in zip(labels, nodes):
|
||||
groups.setdefault(label, []).append(node)
|
||||
|
||||
return groups
|
||||
|
||||
|
||||
def spectral_partition(G, n_clusters):
|
||||
"""
|
||||
利用谱聚类算法对图 G 进行分区:
|
||||
1. 根据所有节点的空间坐标计算欧氏距离矩阵;
|
||||
2. 利用高斯核函数构造相似度矩阵;
|
||||
3. 使用 SpectralClustering 进行归一化割,返回分区结果。
|
||||
|
||||
参数:
|
||||
G: networkx.Graph
|
||||
每个节点需要有 'pos' 属性,其值为 (x, y) 坐标。
|
||||
n_clusters: int
|
||||
希望划分的聚类数目。
|
||||
|
||||
返回:
|
||||
groups: dict
|
||||
键为聚类标签,值为该聚类对应的节点列表。
|
||||
"""
|
||||
# 1. 获取节点空间坐标,注意保证每个节点都有 'pos' 属性
|
||||
pos_dict = nx.get_node_attributes(G, 'pos')
|
||||
nodes = list(G.nodes())
|
||||
coords = np.array([pos_dict[node] for node in nodes])
|
||||
|
||||
# 2. 计算节点之间的欧氏距离矩阵
|
||||
D = pairwise_distances(coords, metric='euclidean')
|
||||
|
||||
# 3. 计算 sigma 值:这里取所有距离的均值,当然也可以根据实际情况调整
|
||||
sigma = np.mean(D)
|
||||
|
||||
# 4. 构造相似度矩阵:使用高斯核函数
|
||||
# A(i, j) = exp( -d(i,j)^2 / (2*sigma^2) )
|
||||
A = np.exp(- (D ** 2) / (2 * sigma ** 2))
|
||||
|
||||
# 5. 使用谱聚类进行图分区
|
||||
clustering = SpectralClustering(n_clusters=n_clusters,
|
||||
affinity='precomputed',
|
||||
random_state=0)
|
||||
labels = clustering.fit_predict(A)
|
||||
|
||||
# 6. 构造字典形式的分区结果
|
||||
groups = {}
|
||||
for label, node in zip(labels, nodes):
|
||||
groups.setdefault(label, []).append(node)
|
||||
|
||||
return groups
|
||||
|
||||
# 2025/03/12
|
||||
# Step3: wn_func类,水力计算
|
||||
# wn_func 主要用于计算:
|
||||
# 水力距离(hydraulic length):即节点之间的水力阻力。
|
||||
# 灵敏度分析(sensitivity analysis):用于优化测压点的布置。
|
||||
# 一些与水力相关的函数,包括 CtoS:求水力距离,stafun:求状态函数F
|
||||
# # diff:求F对P的导数,返回灵敏度矩阵A
|
||||
# # sensitivity:返回灵敏度和总灵敏度
|
||||
class wn_func(object):
|
||||
|
||||
# Step3.1: 初始化
|
||||
def __init__(self, wn: wntr.network.WaterNetworkModel, min_diameter: int):
|
||||
"""
|
||||
获取管网模型信息
|
||||
:param wn: 由wntr生成的模型
|
||||
:param min_diameter: 安装的最小管径
|
||||
"""
|
||||
# self.results: wntr.sim.results.SimulationResults,仿真结果,包含压力、流量、水头等数据
|
||||
self.results = wntr.sim.EpanetSimulator(wn).run_sim() # 存储运行结果
|
||||
self.wn = wn
|
||||
# self.q:pandas.DataFrame,管道流量,索引为时间步长,列为管道名称
|
||||
self.q = self.results.link['flowrate']
|
||||
# ReservoirIndex / Tankindex: list[str],水库 / 水箱节点名称列表
|
||||
ReservoirIndex = wn.reservoir_name_list
|
||||
Tankindex = wn.tank_name_list
|
||||
# 删除水库节点,删除与直接水库相连的虚拟管道
|
||||
# self.pipes: list[str],所有管道的名称
|
||||
self.pipes = wn.pipe_name_list
|
||||
# self.nodes: list[str],所有节点的名称
|
||||
self.nodes = wn.node_name_list
|
||||
# self.coordinates:pandas.Series,节点坐标,索引为节点名,值为 (x, y) 坐标的 tuple
|
||||
self.coordinates = wn.query_node_attribute('coordinates')
|
||||
# allpumps / allvalves: list[str],所有泵/阀门名称列表
|
||||
allpumps = wn.pump_name_list
|
||||
allvalves = wn.valve_name_list
|
||||
# pumpstnode / pumpednode / valvestnode / valveednode: list[str],存储泵和阀门 起终点节点的名称
|
||||
pumpstnode = []
|
||||
pumpednode = []
|
||||
valvestnode = []
|
||||
valveednode = []
|
||||
# Reservoirpipe / Reservoirednode: list[str],记录与水库相关的管道和节点
|
||||
Reservoirpipe = []
|
||||
Reservoirednode = []
|
||||
for pump in allpumps:
|
||||
pumpstnode.append(wn.links[pump].start_node.name)
|
||||
pumpednode.append(wn.links[pump].end_node.name)
|
||||
for valve in allvalves:
|
||||
valvestnode.append(wn.links[valve].start_node.name)
|
||||
valveednode.append(wn.links[valve].end_node.name)
|
||||
for pipe in self.pipes:
|
||||
if wn.links[pipe].start_node.name in ReservoirIndex:
|
||||
Reservoirpipe.append(pipe)
|
||||
Reservoirednode.append(wn.links[pipe].end_node.name)
|
||||
if wn.links[pipe].start_node.name in Tankindex:
|
||||
Reservoirpipe.append(pipe)
|
||||
Reservoirednode.append(wn.links[pipe].end_node.name)
|
||||
if wn.links[pipe].end_node.name in Tankindex:
|
||||
Reservoirpipe.append(pipe)
|
||||
Reservoirednode.append(wn.links[pipe].start_node.name)
|
||||
# 泵的起终点、tank、reservoir
|
||||
# self.delnodes: list[str],需要删除的节点(包括水库、泵、阀门连接的节点)
|
||||
self.delnodes = list(
|
||||
set(ReservoirIndex).union(Tankindex, pumpstnode, pumpednode, valvestnode, valveednode, Reservoirednode))
|
||||
# 泵、起终点为tank、reservoir的管道
|
||||
# self.delpipes: list[str],需要删除的管道(包括水库、泵、阀门连接的管道)
|
||||
self.delpipes = list(set(wn.pump_name_list).union(wn.valve_name_list).union(Reservoirpipe))
|
||||
self.pipes = [pipe for pipe in wn.pipe_name_list if pipe not in self.delpipes]
|
||||
# self.L: list[float],所有管道的长度(以米为单位)
|
||||
self.L = wn.query_link_attribute('length')[self.pipes].tolist()
|
||||
self.n = len(self.nodes)
|
||||
self.m = len(self.pipes)
|
||||
# self.unit_headloss: list[float],单位水头损失(headloss 数据的第一行,单位:米/km)
|
||||
self.unit_headloss = self.results.link['headloss'].iloc[0, :].tolist()
|
||||
##
|
||||
self.delnodes1 = list(set(ReservoirIndex).union(Tankindex))
|
||||
|
||||
# === 改动新增部分:筛选管径小于 min_diameter 的管道节点 ===
|
||||
self.less_than_min_diameter_junction_list = []
|
||||
for pipe in self.pipes:
|
||||
diameter = wn.links[pipe].diameter
|
||||
if diameter < min_diameter:
|
||||
start_node = wn.links[pipe].start_node.name
|
||||
end_node = wn.links[pipe].end_node.name
|
||||
self.less_than_min_diameter_junction_list.extend([start_node, end_node])
|
||||
# 去重
|
||||
self.less_than_min_diameter_junction_list = list(set(self.less_than_min_diameter_junction_list))
|
||||
|
||||
# Step3.2: 计算水力距离
|
||||
def CtoS(self):
|
||||
"""
|
||||
计算水力距离矩阵
|
||||
:return:
|
||||
"""
|
||||
# 水力距离:当行索引对应的节点为控制点时,列索引对应的节点距离控制点的(路径*水头损失)的最小值
|
||||
# nodes:list[str](节点名称)
|
||||
nodes = copy.deepcopy(self.nodes)
|
||||
# pipes:list[str](管道名称)
|
||||
pipes = self.pipes
|
||||
wn = self.wn
|
||||
# n / m:int(节点数 / 管道数)
|
||||
n = self.n
|
||||
m = self.m
|
||||
s1 = [0] * m
|
||||
q = self.q
|
||||
L = self.L
|
||||
# H1:pandas.DataFrame,水头数据,索引为时间步长,列为节点名
|
||||
H1 = self.results.node['head'].T
|
||||
# hh:list[float],计算管道两端水头之差
|
||||
hh = []
|
||||
# 水头损失
|
||||
for p in pipes:
|
||||
h1 = self.wn.links[p].start_node.name
|
||||
h1 = H1.loc[str(h1)]
|
||||
h2 = self.wn.links[p].end_node.name
|
||||
h2 = H1.loc[str(h2)]
|
||||
hh.append(abs(h1 - h2))
|
||||
hh = np.array(hh)
|
||||
# headloss:pandas.DataFrame,管道水头损失矩阵
|
||||
headloss = pd.DataFrame(hh, index=pipes).T
|
||||
# s1:管道阻力系数,s2:将管道阻力系数与管道的起始节点和终止节点对应
|
||||
hf = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
|
||||
weightL = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
|
||||
# s2为对应管道起始节点与终止节点的粗糙度系数矩阵,index代表起始节点,columns代表终止节点
|
||||
G = nx.DiGraph()
|
||||
for i in range(0, m):
|
||||
pipe = pipes[i]
|
||||
a = wn.links[pipe].start_node.name
|
||||
b = wn.links[pipe].end_node.name
|
||||
if q.loc[0, pipe] > 0:
|
||||
hf.loc[a, b] = headloss.loc[0, pipe]
|
||||
weightL.loc[a, b] = headloss.loc[0, pipe] * L[i]
|
||||
G.add_weighted_edges_from([(a, b, weightL.loc[a, b])])
|
||||
|
||||
else:
|
||||
hf.loc[b, a] = headloss.loc[0, pipe]
|
||||
weightL.loc[b, a] = headloss.loc[0, pipe] * L[i]
|
||||
G.add_weighted_edges_from([(b, a, weightL.loc[b, a])])
|
||||
|
||||
hydraulicL = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
|
||||
|
||||
for a in nodes:
|
||||
if a in G.nodes:
|
||||
d = nx.shortest_path_length(G, source=a, weight='weight')
|
||||
for b in list(d.keys()):
|
||||
hydraulicL.loc[a, b] = d[b]
|
||||
|
||||
hydraulicL = hydraulicL.drop(self.delnodes)
|
||||
hydraulicL = hydraulicL.drop(self.delnodes, axis=1)
|
||||
|
||||
# 求加权水力距离
|
||||
return hydraulicL, G
|
||||
|
||||
# Step3.3: 计算灵敏度矩阵
|
||||
# 获取关系矩阵
|
||||
def get_Conn(self):
|
||||
"""
|
||||
计算管网连接关系矩阵
|
||||
:return:
|
||||
"""
|
||||
m = self.wn.num_links
|
||||
n = self.wn.num_nodes
|
||||
p = self.wn.num_pumps
|
||||
v = self.wn.num_valves
|
||||
|
||||
self.nonjunc_index = []
|
||||
self.non_link_index = []
|
||||
for r in self.wn.reservoirs():
|
||||
self.nonjunc_index.append(r[0])
|
||||
for t in self.wn.tanks():
|
||||
self.nonjunc_index.append(t[0])
|
||||
# Conn:numpy.matrix,节点-管道连接矩阵,起点 -1,终点 1
|
||||
Conn = np.mat(np.zeros([n, m - p - v])) # 节点和管道的关系矩阵,行为节点,列为管道,起点为-1,终点为1
|
||||
# NConn:numpy.matrix,节点-节点连接矩阵,有管道相连的地方设为 1
|
||||
NConn = np.mat(np.zeros([n, n])) # 节点之间的关系,之间有管道为1,反之为0
|
||||
# pipes:list[str],去除泵和阀门的管道列表
|
||||
pipes = [pipe for pipe in self.wn.pipes() if pipe not in self.wn.pumps() and pipe not in self.wn.valves()]
|
||||
for pipe_name, pipe in pipes:
|
||||
start = self.wn.node_name_list.index(pipe.start_node_name)
|
||||
end = self.wn.node_name_list.index(pipe.end_node_name)
|
||||
p_index = self.wn.link_name_list.index(pipe_name)
|
||||
Conn[start, p_index] = -1
|
||||
Conn[end, p_index] = 1
|
||||
NConn[start, end] = 1
|
||||
NConn[end, start] = 1
|
||||
self.A = Conn
|
||||
link_name_list = [link for link in self.wn.link_name_list if
|
||||
link not in self.wn.pump_name_list and link not in self.wn.valve_name_list]
|
||||
self.A2 = pd.DataFrame(self.A, index=self.wn.node_name_list, columns=link_name_list)
|
||||
self.A2 = self.A2.drop(self.delnodes)
|
||||
for pipe in self.delpipes:
|
||||
if pipe not in self.wn.pump_name_list and pipe not in self.wn.valve_name_list:
|
||||
self.A2 = self.A2.drop(columns=pipe)
|
||||
self.junc_list = self.A2.index
|
||||
self.A2 = np.mat(self.A2) # 节点管道关系
|
||||
self.A3 = NConn
|
||||
|
||||
def Jaco(self, hL: pandas.DataFrame):
|
||||
"""
|
||||
计算灵敏度矩阵(节点压力对粗糙度变化的响应)
|
||||
:param hL: 水力距离矩阵
|
||||
:return:
|
||||
"""
|
||||
# global result
|
||||
# A:numpy.matrix, 节点-管道关系矩阵
|
||||
A = self.A2
|
||||
wn = self.wn
|
||||
|
||||
try:
|
||||
result = wntr.sim.EpanetSimulator(wn).run_sim()
|
||||
except EpanetException:
|
||||
pass
|
||||
finally:
|
||||
h = result.link['headloss'][self.pipes].values[0]
|
||||
q = result.link['flowrate'][self.pipes].values[0]
|
||||
l = self.wn.query_link_attribute('length')[self.pipes]
|
||||
C = self.wn.query_link_attribute('roughness')[self.pipes]
|
||||
# headloss:numpy.ndarray,水头损失数组
|
||||
headloss = np.array(h)
|
||||
# 调整流量方向
|
||||
for i in range(0, len(q)):
|
||||
if q[i] < 0:
|
||||
A[:, i] = -A[:, i]
|
||||
# q:numpy.ndarray,流量数组
|
||||
q = np.abs(q)
|
||||
# 两个灵敏度矩阵
|
||||
# B / S:numpy.matrix,灵敏度计算的中间矩阵
|
||||
B = np.mat(np.diag(q / ((1.852 * headloss) + 1e-10)))
|
||||
S = np.mat(np.diag(q / C))
|
||||
# X:numpy.matrix, 灵敏度矩阵
|
||||
X = A * B * A.T
|
||||
try:
|
||||
det = np.linalg.det(X)
|
||||
except RuntimeError as e:
|
||||
sign, logdet = slogdet(X) # 防止溢出
|
||||
det = sign * np.exp(logdet)
|
||||
if det != 0:
|
||||
J_H_Cw = X.I * A * S
|
||||
# J_H_Q = -X.I
|
||||
J_q_Cw = S - B * A.T * X.I * A * S # 去掉了delnodes和delpipes
|
||||
# J_q_Q = B * A.T * X.I
|
||||
else: # 当X不可逆
|
||||
J_H_Cw = np.linalg.pinv(X) @ A @ S
|
||||
# J_H_Q = -np.linalg.pinv(X)
|
||||
J_q_Cw = S - B * A.T * np.linalg.pinv(X) * A * S
|
||||
# J_q_Q = B * A.T * np.linalg.pinv(X)
|
||||
|
||||
Sen_pressure = []
|
||||
S_pressure = np.abs(J_H_Cw).sum(axis=1).tolist() # 修改为绝对值
|
||||
for ss in S_pressure:
|
||||
Sen_pressure.append(ss[0])
|
||||
# 求总灵敏度
|
||||
SS_pressure = copy.deepcopy(hL)
|
||||
for i in range(0, len(Sen_pressure)):
|
||||
SS_pressure.iloc[i, :] = SS_pressure.iloc[i, :] * Sen_pressure[i]
|
||||
SS = copy.deepcopy(hL)
|
||||
for i in range(0, len(Sen_pressure)):
|
||||
SS.iloc[i, :] = SS.iloc[i, :] * Sen_pressure[i]
|
||||
# SS[i,j]:节点nodes[i]的灵敏度*该节点到nodes[j]的水力距离
|
||||
return SS
|
||||
|
||||
|
||||
# 2025/03/12
|
||||
# Step4: 传感器布置优化
|
||||
# Sensorplacement
|
||||
# weight:分配权重
|
||||
# sensor:传感器布置的位置
|
||||
class Sensorplacement(wn_func):
|
||||
"""
|
||||
Sensorplacement 类继承了 wn_func 类,并且用于计算和优化传感器布置的位置。
|
||||
"""
|
||||
def __init__(self, wn: wntr.network.WaterNetworkModel, sensornum: int, min_diameter: int):
|
||||
"""
|
||||
|
||||
:param wn: 由wntr生成的模型
|
||||
:param sensornum: 传感器的数量
|
||||
:param min_diameter: 安装的最小管径
|
||||
"""
|
||||
wn_func.__init__(self, wn, min_diameter=min_diameter)
|
||||
self.sensornum = sensornum
|
||||
|
||||
# 1.某个节点到所有节点的加权距离之和
|
||||
# 2.某个节点到该组内所有节点的加权距离之和
|
||||
def sensor(self, SS: pandas.DataFrame, G: networkx.Graph, group: dict[int, list[str]]):
|
||||
"""
|
||||
sensor 方法是用来根据灵敏度矩阵 SS 和加权图 G 来确定传感器布置位置的
|
||||
:param SS: 灵敏度矩阵,每个节点的行和列代表不同节点,矩阵元素表示节点间的灵敏度。SS.iloc[i, :] 表示第 i 行对应节点 i 到所有其他节点的灵敏度
|
||||
:param G: 加权图,表示管网的拓扑结构,每个节点通过管道连接。图的边的权重通常是根据水力距离或者流量等计算的
|
||||
:param group: 节点分组,字典的键是分组编号,值是该组的节点名称列表
|
||||
:return:
|
||||
"""
|
||||
# 传感器布置个数以及位置
|
||||
# W = self.weight()
|
||||
n = self.n - len(self.delnodes)
|
||||
nodes = copy.deepcopy(self.nodes)
|
||||
for node in self.delnodes:
|
||||
nodes.remove(node)
|
||||
# sumSS:list[float],每个节点到其他节点的灵敏度之和。SS.iloc[i, :] 返回第 i 个节点与所有其他节点的灵敏度值,sum(SS.iloc[i, :]) 计算这些灵敏度值的总和。
|
||||
sumSS = []
|
||||
for i in range(0, n):
|
||||
sumSS.append(sum(SS.iloc[i, :]))
|
||||
# 一个整数范围,表示每个节点的索引,用作sumSS_ DataFrame的索引
|
||||
indices = range(0, n)
|
||||
# sumSS_:pandas.DataFrame,将 sumSS 转换成 DataFrame 格式,并且将节点的总灵敏度保存到 CSV 文件 sumSS_data.csv 中
|
||||
sumSS_ = pd.DataFrame(np.array(sumSS), index=indices)
|
||||
# sumSS_.to_csv('sumSS_data.csv') # 存储节点总灵敏度
|
||||
|
||||
# sumSS:pandas.DataFrame,sumSS 被转换为 DataFrame 类型,并且按总灵敏度(即灵敏度之和)降序排列。此时,sumSS 是按节点的灵敏度之和排序的 DataFrame
|
||||
sumSS = pd.DataFrame(np.array(sumSS), index=nodes)
|
||||
sumSS = sumSS.sort_values(by=[0], ascending=[False])
|
||||
# sensorindex:list[str],用于存储根据灵敏度排序选出的传感器位置的节点名称,存储根据总灵敏度排序的节点列表,用于传感器布置
|
||||
sensorindex = []
|
||||
# sensorindex_2:list[str],用于存储每组内根据灵敏度排序选出的传感器位置的节点名称,存储每个组内根据灵敏度排序选择的传感器节点
|
||||
sensorindex_2 = []
|
||||
# group_S:dict[int, pandas.DataFrame],存储每个组内的灵敏度矩阵
|
||||
group_S = {}
|
||||
# group_sumSS:dict[int, list[float]],存储每个组内节点的总灵敏度,值为每个组内节点灵敏度之和的列表
|
||||
group_sumSS = {}
|
||||
|
||||
# 改动
|
||||
for i in range(0, len(group)):
|
||||
for node in self.delnodes:
|
||||
# 这里的group[i]是每个组的节点列表,代码首先去除已经被标记为删除的节点self.delnodes
|
||||
if node in group[i]:
|
||||
group[i].remove(node)
|
||||
group_S[i] = SS.loc[group[i], group[i]]
|
||||
# 对每个组内的节点,计算组内节点的总灵敏度(group_sumSS[i])。它将每个组内节点的灵敏度值相加,并且按灵敏度降序排序
|
||||
group_sumSS[i] = []
|
||||
for j in range(0, len(group[i])):
|
||||
group_sumSS[i].append(sum(group_S[i].iloc[j, :]))
|
||||
group_sumSS[i] = pd.DataFrame(np.array(group_sumSS[i]), index=group[i])
|
||||
group_sumSS[i] = group_sumSS[i].sort_values(by=[0], ascending=[False])
|
||||
for node in self.less_than_min_diameter_junction_list:
|
||||
# 这里的group_sumSS[i]是每个分组的灵敏度节点排序列表,去除已经被标记为删除的节点self.less_than_min_diameter_junction_list
|
||||
if node in group_sumSS[i]:
|
||||
group_sumSS[i].remove(node)
|
||||
pass
|
||||
|
||||
# 1.选sumSS最大的节点,然后把这个节点所在的那个组删掉,就可以不再从这个组选点。再重新排序选sumSS最大的;
|
||||
# 2.在每组内选group_sumSS最大的节点
|
||||
# 在这个循环中,首先选择灵敏度最高的节点Smaxnode并添加到sensorindex。然后根据灵敏度排序,删除已选的节点并继续选择下一个灵敏度最大的节点。这个过程用于选择传感器的位置
|
||||
sensornum = self.sensornum
|
||||
for i in range(0, sensornum):
|
||||
# Smaxnode:str,最大灵敏度节点,sumSS.index[0] 表示灵敏度最高的节点
|
||||
Smaxnode = sumSS.index[0]
|
||||
sensorindex.append(Smaxnode)
|
||||
sensorindex_2.append(group_sumSS[i].index[0])
|
||||
|
||||
for key, value in group.items():
|
||||
if Smaxnode in value:
|
||||
sumSS = sumSS.drop(index=group[key])
|
||||
continue
|
||||
|
||||
sumSS = sumSS.sort_values(by=[0], ascending=[False])
|
||||
|
||||
return sensorindex, sensorindex_2
|
||||
|
||||
|
||||
# 2025/03/13
|
||||
def get_ID(name: str, sensor_num: int, min_diameter: int) -> list[str]:
|
||||
"""
|
||||
获取布置测压点的坐标,初始测压点布置根据灵敏度来布置,计算初始情况下的校准过程的error
|
||||
:param name: 数据库名称
|
||||
:param sensor_num: 测压点数目
|
||||
:param min_diameter: 安装的最小管径
|
||||
:return: 测压点节点ID
|
||||
"""
|
||||
# inp_file_real:str,输入文件名,表示原始水力模型文件的路径,该文件格式为 EPANET 输入文件(.inp),包含管网的结构信息、节点、管道、泵等数据
|
||||
inp_file_real = f'./db_inp/{name}.db.inp'
|
||||
# sensornum:int,需要布置的传感器数量
|
||||
# sensornum = sensor_num
|
||||
# wn_real:wntr.network.WaterNetworkModel,加载 EPANET 水力模型
|
||||
wn_real = wntr.network.WaterNetworkModel(inp_file_real) # 真实粗糙度的原始管网
|
||||
# sim_real:wntr.sim.EpanetSimulator,创建一个水力仿真器对象
|
||||
sim_real = wntr.sim.EpanetSimulator(wn_real)
|
||||
# results_real:wntr.sim.results.SimulationResults,运行仿真并返回结果
|
||||
results_real = sim_real.run_sim()
|
||||
|
||||
# real_C:list[float],包含所有管道粗糙度的列表
|
||||
real_C = wn_real.query_link_attribute('roughness').tolist()
|
||||
# wn_fun1:wn_func(继承自 object),创建 wn_func 类的实例,传入 wn_real 水力模型对象。wn_func 用于计算管网相关的水力属性,比如水力距离、灵敏度等
|
||||
wn_fun1 = wn_func(wn_real, min_diameter=min_diameter)
|
||||
# nodes:list[str],管网的节点名称列表
|
||||
nodes = wn_fun1.nodes
|
||||
# delnodes:list[str],被删除的节点(如水库、泵、阀门连接的节点等)
|
||||
delnodes = wn_fun1.delnodes
|
||||
# Coor_node:pandas.DataFrame
|
||||
Coor_node = getCoor(wn_real)
|
||||
Coor_node = Coor_node.drop(wn_fun1.delnodes)
|
||||
nodes = [node for node in wn_fun1.nodes if node not in delnodes]
|
||||
# coordinates:pandas.Series,存储所有节点的坐标,类型为 Series,索引为节点名称,值为 (x, y) 坐标对
|
||||
coordinates = wn_fun1.coordinates
|
||||
|
||||
# 随机产生监测点
|
||||
# junctionnum:int,nodes 的长度,表示节点的数量
|
||||
junctionnum = len(nodes)
|
||||
# random_numbers:list[int],使用 random.sample 随机选择 sensornum(20)个节点的编号。它返回一个不重复的随机编号列表
|
||||
# random_numbers = random.sample(range(junctionnum), sensor_num)
|
||||
# for i in range(sensor_num):
|
||||
# # print(random_numbers[i])
|
||||
|
||||
wn_fun1.get_Conn()
|
||||
# hL:pandas.DataFrame,水力距离矩阵,表示每个节点到其他节点的水力阻力
|
||||
# G:networkx.DiGraph,加权有向图,表示管网的拓扑结构,节点之间的边带有权重
|
||||
hL, G = wn_fun1.CtoS()
|
||||
# SS:pandas.DataFrame,灵敏度矩阵,表示每个节点对管网变化(如粗糙度、流量等)的响应
|
||||
SS = wn_fun1.Jaco(hL)
|
||||
# group:dict[int, list[str]],使用 kgroup 函数将节点按坐标分成若干组,每组包含的节点数不一定相同。group 是一个字典,键为分组编号,值为节点名列表
|
||||
|
||||
G1 = wn_real.to_graph()
|
||||
G1 = G1.to_undirected() # 变为无向图
|
||||
|
||||
group = kgroup(Coor_node, sensor_num)
|
||||
# group = skater_partition(G1, sensor_num)
|
||||
# group = spectral_partition(G1, sensor_num)
|
||||
|
||||
# print(group)
|
||||
# --------------------- 保存 group 数据 ---------------------
|
||||
# 将 group 数据转换为一个“长格式”的 DataFrame,
|
||||
# 每一行记录一个节点及其所属的分组
|
||||
# group_data = []
|
||||
# for group_id, node_list in group.items():
|
||||
# for node in node_list:
|
||||
# group_data.append({"Group": group_id, "Node": node})
|
||||
#
|
||||
# df_group = pd.DataFrame(group_data)
|
||||
#
|
||||
# # 保存为 Excel 文件,文件名为 "group.xlsx";index=False 表示不保存行索引
|
||||
# df_group.to_excel("group.xlsx", index=False)
|
||||
|
||||
# wn_fun:Sensorplacement(继承自wn_func)
|
||||
# 创建Sensorplacement类的实例,传入水力网络模型wn_real和传感器数量sensornum。Sensorplacement用于计算和布置传感器
|
||||
wn_fun = Sensorplacement(wn_real, sensor_num, min_diameter=min_diameter)
|
||||
wn_fun.__dict__.update(wn_fun1.__dict__)
|
||||
# sensorindex:list[str],初始传感器布置位置的节点名称
|
||||
# sensorindex_2:list[str],根据分组选择的传感器位置
|
||||
sensorindex, sensorindex_2 = wn_fun.sensor(SS, G, group) # 初始的sensorindex
|
||||
# print(str(sensor_num), "个测压点,测压点位置:", sensorindex)
|
||||
|
||||
|
||||
# 重新打开数据库
|
||||
# if is_project_open(name=name):
|
||||
# close_project(name=name)
|
||||
# open_project(name=name)
|
||||
# for node_id in sensorindex :
|
||||
# sensor_coord[node_id] = get_node_coord(name=name, node_id=node_id)
|
||||
# close_project(name=name)
|
||||
# print(sensor_coord)
|
||||
# # 分区画图
|
||||
# colorlist = ['lightpink', 'coral', 'rosybrown', 'olive', 'powderblue', 'lightskyblue', 'steelblue', 'peachpuff','brown','silver','indigo','lime','gold','violet','maroon','navy','teal','magenta','cyan',
|
||||
# 'burlywood', 'tan', 'slategrey', 'thistle', 'lightseagreen', 'lightgreen', 'red','blue','yellow','orange','purple','grey','green','pink','lightblue','beige','chartreuse','turquoise','lavender','fuchsia','coral']
|
||||
# G = wn_real.to_graph()
|
||||
# G = G.to_undirected() # 变为无向图
|
||||
# pos = nx.get_node_attributes(G, 'pos')
|
||||
# pass
|
||||
#
|
||||
# for i in range(0, sensor_num):
|
||||
# ax = plt.gca()
|
||||
# ax.set_title(inp_file_real + str(sensor_num))
|
||||
# nodes = nx.draw_networkx_nodes(G, pos, nodelist=group[i], node_color=colorlist[i], node_size=10)
|
||||
# nodes = nx.draw_networkx_nodes(G, pos,
|
||||
# nodelist=sensorindex_2, node_color='red', node_size=70, node_shape='*'
|
||||
# )
|
||||
# edges = nx.draw_networkx_edges(G, pos)
|
||||
# ax.spines['top'].set_visible(False)
|
||||
# ax.spines['right'].set_visible(False)
|
||||
# ax.spines['bottom'].set_visible(False)
|
||||
# ax.spines['left'].set_visible(False)
|
||||
# plt.savefig(inp_file_real + str(sensor_num) + ".png", dpi=300)
|
||||
# plt.show()
|
||||
#
|
||||
# wntr.graphics.plot_network(wn_real, node_attribute=sensorindex_2, node_size=50, node_labels=False,
|
||||
# title=inp_file_real + '_Projetion' + str(sensor_num))
|
||||
# plt.savefig(inp_file_real + '_S' + str(sensor_num) + ".png", dpi=300)
|
||||
# plt.show()
|
||||
return sensorindex
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sensorindex = get_ID(name=project_info.name, sensor_num=20, min_diameter=300)
|
||||
|
||||
print(sensorindex)
|
||||
# 将 sensor_coord 字典转换为 DataFrame,
|
||||
# 使用 orient='index' 表示字典的键作为 DataFrame 的行索引,
|
||||
# 数据中每个键对应的 value 是一个子字典,其键 'x' 和 'y' 成为 DataFrame 的列名
|
||||
# df_sensor_coord = pd.DataFrame.from_dict(sensor_coord, orient='index')
|
||||
#
|
||||
# # 将索引名称设为 'Node'
|
||||
# df_sensor_coord.index.name = 'Node'
|
||||
#
|
||||
# # 保存到 Excel 文件
|
||||
# df_sensor_coord.to_excel("sensor_coord.xlsx", index=True)
|
||||
557
app/algorithms/api_ex/sensor_placement.py
Normal file
557
app/algorithms/api_ex/sensor_placement.py
Normal file
@@ -0,0 +1,557 @@
|
||||
# 改进灵敏度法
|
||||
import networkx
|
||||
import numpy as np
|
||||
import pandas
|
||||
import wntr
|
||||
import pandas as pd
|
||||
import copy
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
from sklearn.cluster import KMeans
|
||||
from wntr.epanet.toolkit import EpanetException
|
||||
from numpy.linalg import slogdet
|
||||
import random
|
||||
from app.services.tjnetwork import *
|
||||
import app.services.project_info as project_info
|
||||
|
||||
# 2025/03/12
|
||||
# Step1: 获取节点坐标
|
||||
def getCoor(wn: wntr.network.WaterNetworkModel) -> pandas.DataFrame:
|
||||
"""
|
||||
获取管网模型的节点坐标
|
||||
:param wn: 由wntr生成的模型
|
||||
:return: 节点坐标
|
||||
"""
|
||||
# site: pandas.Series
|
||||
# index:节点名称(wn.node_name_list)
|
||||
# values:每个节点的坐标,格式为 tuple(如 (x, y) 或 (x, y, z))
|
||||
site = wn.query_node_attribute('coordinates')
|
||||
# Coor: pandas.Series
|
||||
# index:与site相同(节点名称)。
|
||||
# values:坐标转换为numpy.ndarray(如array([10.5, 20.3]))
|
||||
Coor = site.apply(lambda x: np.array(x)) # 将节点坐标转换为numpy数组
|
||||
# x, y: list[float]
|
||||
x = [] # 存储所有节点的 x 坐标
|
||||
y = [] # 存储所有节点的 y 坐标
|
||||
for i in range(0, len(Coor)):
|
||||
x.append(Coor.values[i][0]) # 将 x 坐标存入 x 列表。
|
||||
y.append(Coor.values[i][1]) # 将 y 坐标存入 y 列表
|
||||
# xy: dict[str, list], x、y 坐标的字典
|
||||
xy = {'x': x, 'y': y}
|
||||
# Coor_node: pandas.DataFrame, 存储节点 x, y 坐标的 DataFrame
|
||||
Coor_node = pd.DataFrame(xy, index=wn.node_name_list, columns=['x', 'y'])
|
||||
return Coor_node
|
||||
|
||||
|
||||
# 2025/03/12
|
||||
# Step2: KMeans 聚类
|
||||
# 将节点用kmeans根据坐标分为k组,存入字典g
|
||||
def kgroup(coor: pandas.DataFrame, knum: int) -> dict[int, list[str]]:
|
||||
"""
|
||||
使用KMeans聚类,将节点坐标分组
|
||||
:param coor: 存储所有节点的坐标数据
|
||||
:param knum: 需要分成的聚类数
|
||||
:return: 聚类结果字典
|
||||
"""
|
||||
g = {}
|
||||
# estimator: sklearn.cluster.KMeans,KMeans 聚类模型
|
||||
estimator = KMeans(n_clusters=knum)
|
||||
estimator.fit(coor)
|
||||
# label_pred: numpy.ndarray(int),每个点的类别标签
|
||||
label_pred = estimator.labels_
|
||||
for i in range(0, knum):
|
||||
g[i] = coor[label_pred == i].index.tolist()
|
||||
return g
|
||||
|
||||
|
||||
# 2025/03/12
|
||||
# Step3: wn_func类,水力计算
|
||||
# wn_func 主要用于计算:
|
||||
# 水力距离(hydraulic length):即节点之间的水力阻力。
|
||||
# 灵敏度分析(sensitivity analysis):用于优化测压点的布置。
|
||||
# 一些与水力相关的函数,包括 CtoS:求水力距离,stafun:求状态函数F
|
||||
# # diff:求F对P的导数,返回灵敏度矩阵A
|
||||
# # sensitivity:返回灵敏度和总灵敏度
|
||||
class wn_func(object):
|
||||
|
||||
# Step3.1: 初始化
|
||||
def __init__(self, wn: wntr.network.WaterNetworkModel):
|
||||
"""
|
||||
获取管网模型信息
|
||||
:param wn: 由wntr生成的模型
|
||||
"""
|
||||
# self.results: wntr.sim.results.SimulationResults,仿真结果,包含压力、流量、水头等数据
|
||||
self.results = wntr.sim.EpanetSimulator(wn).run_sim() # 存储运行结果
|
||||
self.wn = wn
|
||||
# self.q:pandas.DataFrame,管道流量,索引为时间步长,列为管道名称
|
||||
self.q = self.results.link['flowrate']
|
||||
# ReservoirIndex / Tankindex: list[str],水库 / 水箱节点名称列表
|
||||
ReservoirIndex = wn.reservoir_name_list
|
||||
Tankindex = wn.tank_name_list
|
||||
# 删除水库节点,删除与直接水库相连的虚拟管道
|
||||
# self.pipes: list[str],所有管道的名称
|
||||
self.pipes = wn.pipe_name_list
|
||||
# self.nodes: list[str],所有节点的名称
|
||||
self.nodes = wn.node_name_list
|
||||
# self.coordinates:pandas.Series,节点坐标,索引为节点名,值为 (x, y) 坐标的 tuple
|
||||
self.coordinates = wn.query_node_attribute('coordinates')
|
||||
# allpumps / allvalves: list[str],所有泵/阀门名称列表
|
||||
allpumps = wn.pump_name_list
|
||||
allvalves = wn.valve_name_list
|
||||
# pumpstnode / pumpednode / valvestnode / valveednode: list[str],存储泵和阀门 起终点节点的名称
|
||||
pumpstnode = []
|
||||
pumpednode = []
|
||||
valvestnode = []
|
||||
valveednode = []
|
||||
# Reservoirpipe / Reservoirednode: list[str],记录与水库相关的管道和节点
|
||||
Reservoirpipe = []
|
||||
Reservoirednode = []
|
||||
for pump in allpumps:
|
||||
pumpstnode.append(wn.links[pump].start_node.name)
|
||||
pumpednode.append(wn.links[pump].end_node.name)
|
||||
for valve in allvalves:
|
||||
valvestnode.append(wn.links[valve].start_node.name)
|
||||
valveednode.append(wn.links[valve].end_node.name)
|
||||
for pipe in self.pipes:
|
||||
if wn.links[pipe].start_node.name in ReservoirIndex:
|
||||
Reservoirpipe.append(pipe)
|
||||
Reservoirednode.append(wn.links[pipe].end_node.name)
|
||||
if wn.links[pipe].start_node.name in Tankindex:
|
||||
Reservoirpipe.append(pipe)
|
||||
Reservoirednode.append(wn.links[pipe].end_node.name)
|
||||
if wn.links[pipe].end_node.name in Tankindex:
|
||||
Reservoirpipe.append(pipe)
|
||||
Reservoirednode.append(wn.links[pipe].start_node.name)
|
||||
# 泵的起终点、tank、reservoir
|
||||
# self.delnodes: list[str],需要删除的节点(包括水库、泵、阀门连接的节点)
|
||||
self.delnodes = list(
|
||||
set(ReservoirIndex).union(Tankindex, pumpstnode, pumpednode, valvestnode, valveednode, Reservoirednode))
|
||||
# 泵、起终点为tank、reservoir的管道
|
||||
# self.delpipes: list[str],需要删除的管道(包括水库、泵、阀门连接的管道)
|
||||
self.delpipes = list(set(wn.pump_name_list).union(wn.valve_name_list).union(Reservoirpipe))
|
||||
self.pipes = [pipe for pipe in wn.pipe_name_list if pipe not in self.delpipes]
|
||||
# self.L: list[float],所有管道的长度(以米为单位)
|
||||
self.L = wn.query_link_attribute('length')[self.pipes].tolist()
|
||||
self.n = len(self.nodes)
|
||||
self.m = len(self.pipes)
|
||||
# self.unit_headloss: list[float],单位水头损失(headloss 数据的第一行,单位:米/km)
|
||||
self.unit_headloss = self.results.link['headloss'].iloc[0, :].tolist()
|
||||
##
|
||||
self.delnodes1 = list(set(ReservoirIndex).union(Tankindex))
|
||||
|
||||
# Step3.2: 计算水力距离
|
||||
def CtoS(self):
|
||||
"""
|
||||
计算水力距离矩阵
|
||||
:return:
|
||||
"""
|
||||
# 水力距离:当行索引对应的节点为控制点时,列索引对应的节点距离控制点的(路径*水头损失)的最小值
|
||||
# nodes:list[str](节点名称)
|
||||
nodes = copy.deepcopy(self.nodes)
|
||||
# pipes:list[str](管道名称)
|
||||
pipes = self.pipes
|
||||
wn = self.wn
|
||||
# n / m:int(节点数 / 管道数)
|
||||
n = self.n
|
||||
m = self.m
|
||||
s1 = [0] * m
|
||||
q = self.q
|
||||
L = self.L
|
||||
# H1:pandas.DataFrame,水头数据,索引为时间步长,列为节点名
|
||||
H1 = self.results.node['head'].T
|
||||
# hh:list[float],计算管道两端水头之差
|
||||
hh = []
|
||||
# 水头损失
|
||||
for p in pipes:
|
||||
h1 = self.wn.links[p].start_node.name
|
||||
h1 = H1.loc[str(h1)]
|
||||
h2 = self.wn.links[p].end_node.name
|
||||
h2 = H1.loc[str(h2)]
|
||||
hh.append(abs(h1 - h2))
|
||||
hh = np.array(hh)
|
||||
# headloss:pandas.DataFrame,管道水头损失矩阵
|
||||
headloss = pd.DataFrame(hh, index=pipes).T
|
||||
# s1:管道阻力系数,s2:将管道阻力系数与管道的起始节点和终止节点对应
|
||||
hf = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
|
||||
weightL = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
|
||||
# s2为对应管道起始节点与终止节点的粗糙度系数矩阵,index代表起始节点,columns代表终止节点
|
||||
G = nx.DiGraph()
|
||||
for i in range(0, m):
|
||||
pipe = pipes[i]
|
||||
a = wn.links[pipe].start_node.name
|
||||
b = wn.links[pipe].end_node.name
|
||||
if q.loc[0, pipe] > 0:
|
||||
hf.loc[a, b] = headloss.loc[0, pipe]
|
||||
weightL.loc[a, b] = headloss.loc[0, pipe] * L[i]
|
||||
G.add_weighted_edges_from([(a, b, weightL.loc[a, b])])
|
||||
|
||||
else:
|
||||
hf.loc[b, a] = headloss.loc[0, pipe]
|
||||
weightL.loc[b, a] = headloss.loc[0, pipe] * L[i]
|
||||
G.add_weighted_edges_from([(b, a, weightL.loc[b, a])])
|
||||
|
||||
hydraulicL = pd.DataFrame(np.array([0] * (n ** 2)).reshape(n, n), index=nodes, columns=nodes, dtype=float)
|
||||
|
||||
for a in nodes:
|
||||
if a in G.nodes:
|
||||
d = nx.shortest_path_length(G, source=a, weight='weight')
|
||||
for b in list(d.keys()):
|
||||
hydraulicL.loc[a, b] = d[b]
|
||||
|
||||
hydraulicL = hydraulicL.drop(self.delnodes)
|
||||
hydraulicL = hydraulicL.drop(self.delnodes, axis=1)
|
||||
|
||||
# 求加权水力距离
|
||||
return hydraulicL, G
|
||||
|
||||
# Step3.3: 计算灵敏度矩阵
|
||||
# 获取关系矩阵
|
||||
def get_Conn(self):
|
||||
"""
|
||||
计算管网连接关系矩阵
|
||||
:return:
|
||||
"""
|
||||
m = self.wn.num_links
|
||||
n = self.wn.num_nodes
|
||||
p = self.wn.num_pumps
|
||||
v = self.wn.num_valves
|
||||
|
||||
self.nonjunc_index = []
|
||||
self.non_link_index = []
|
||||
for r in self.wn.reservoirs():
|
||||
self.nonjunc_index.append(r[0])
|
||||
for t in self.wn.tanks():
|
||||
self.nonjunc_index.append(t[0])
|
||||
# Conn:numpy.matrix,节点-管道连接矩阵,起点 -1,终点 1
|
||||
Conn = np.mat(np.zeros([n, m - p - v])) # 节点和管道的关系矩阵,行为节点,列为管道,起点为-1,终点为1
|
||||
# NConn:numpy.matrix,节点-节点连接矩阵,有管道相连的地方设为 1
|
||||
NConn = np.mat(np.zeros([n, n])) # 节点之间的关系,之间有管道为1,反之为0
|
||||
# pipes:list[str],去除泵和阀门的管道列表
|
||||
pipes = [pipe for pipe in self.wn.pipes() if pipe not in self.wn.pumps() and pipe not in self.wn.valves()]
|
||||
for pipe_name, pipe in pipes:
|
||||
start = self.wn.node_name_list.index(pipe.start_node_name)
|
||||
end = self.wn.node_name_list.index(pipe.end_node_name)
|
||||
p_index = self.wn.link_name_list.index(pipe_name)
|
||||
Conn[start, p_index] = -1
|
||||
Conn[end, p_index] = 1
|
||||
NConn[start, end] = 1
|
||||
NConn[end, start] = 1
|
||||
self.A = Conn
|
||||
link_name_list = [link for link in self.wn.link_name_list if
|
||||
link not in self.wn.pump_name_list and link not in self.wn.valve_name_list]
|
||||
self.A2 = pd.DataFrame(self.A, index=self.wn.node_name_list, columns=link_name_list)
|
||||
self.A2 = self.A2.drop(self.delnodes)
|
||||
for pipe in self.delpipes:
|
||||
if pipe not in self.wn.pump_name_list and pipe not in self.wn.valve_name_list:
|
||||
self.A2 = self.A2.drop(columns=pipe)
|
||||
self.junc_list = self.A2.index
|
||||
self.A2 = np.mat(self.A2) # 节点管道关系
|
||||
self.A3 = NConn
|
||||
|
||||
def Jaco(self, hL: pandas.DataFrame):
|
||||
"""
|
||||
计算灵敏度矩阵(节点压力对粗糙度变化的响应)
|
||||
:param hL: 水力距离矩阵
|
||||
:return:
|
||||
"""
|
||||
# global result
|
||||
# A:numpy.matrix, 节点-管道关系矩阵
|
||||
A = self.A2
|
||||
wn = self.wn
|
||||
|
||||
try:
|
||||
result = wntr.sim.EpanetSimulator(wn).run_sim()
|
||||
except EpanetException:
|
||||
pass
|
||||
finally:
|
||||
h = result.link['headloss'][self.pipes].values[0]
|
||||
q = result.link['flowrate'][self.pipes].values[0]
|
||||
l = self.wn.query_link_attribute('length')[self.pipes]
|
||||
C = self.wn.query_link_attribute('roughness')[self.pipes]
|
||||
# headloss:numpy.ndarray,水头损失数组
|
||||
headloss = np.array(h)
|
||||
# 调整流量方向
|
||||
for i in range(0, len(q)):
|
||||
if q[i] < 0:
|
||||
A[:, i] = -A[:, i]
|
||||
# q:numpy.ndarray,流量数组
|
||||
q = np.abs(q)
|
||||
# 两个灵敏度矩阵
|
||||
# B / S:numpy.matrix,灵敏度计算的中间矩阵
|
||||
B = np.mat(np.diag(q / ((1.852 * headloss) + 1e-10)))
|
||||
S = np.mat(np.diag(q / C))
|
||||
# X:numpy.matrix, 灵敏度矩阵
|
||||
X = A * B * A.T
|
||||
try:
|
||||
det = np.linalg.det(X)
|
||||
except RuntimeError as e:
|
||||
sign, logdet = slogdet(X) # 防止溢出
|
||||
det = sign * np.exp(logdet)
|
||||
if det != 0:
|
||||
J_H_Cw = X.I * A * S
|
||||
# J_H_Q = -X.I
|
||||
J_q_Cw = S - B * A.T * X.I * A * S # 去掉了delnodes和delpipes
|
||||
# J_q_Q = B * A.T * X.I
|
||||
else: # 当X不可逆
|
||||
J_H_Cw = np.linalg.pinv(X) @ A @ S
|
||||
# J_H_Q = -np.linalg.pinv(X)
|
||||
J_q_Cw = S - B * A.T * np.linalg.pinv(X) * A * S
|
||||
# J_q_Q = B * A.T * np.linalg.pinv(X)
|
||||
|
||||
Sen_pressure = []
|
||||
S_pressure = np.abs(J_H_Cw).sum(axis=1).tolist() # 修改为绝对值
|
||||
for ss in S_pressure:
|
||||
Sen_pressure.append(ss[0])
|
||||
# 求总灵敏度
|
||||
SS_pressure = copy.deepcopy(hL)
|
||||
for i in range(0, len(Sen_pressure)):
|
||||
SS_pressure.iloc[i, :] = SS_pressure.iloc[i, :] * Sen_pressure[i]
|
||||
SS = copy.deepcopy(hL)
|
||||
for i in range(0, len(Sen_pressure)):
|
||||
SS.iloc[i, :] = SS.iloc[i, :] * Sen_pressure[i]
|
||||
# SS[i,j]:节点nodes[i]的灵敏度*该节点到nodes[j]的水力距离
|
||||
return SS
|
||||
|
||||
|
||||
# 2025/03/12
|
||||
# Step4: 传感器布置优化
|
||||
# Sensorplacement
|
||||
# weight:分配权重
|
||||
# sensor:传感器布置的位置
|
||||
class Sensorplacement(wn_func):
|
||||
"""
|
||||
Sensorplacement 类继承了 wn_func 类,并且用于计算和优化传感器布置的位置。
|
||||
"""
|
||||
def __init__(self, wn: wntr.network.WaterNetworkModel, sensornum: int):
|
||||
"""
|
||||
|
||||
:param wn: 由wntr生成的模型
|
||||
:param sensornum: 传感器的数量
|
||||
"""
|
||||
wn_func.__init__(self, wn)
|
||||
self.sensornum = sensornum
|
||||
|
||||
# 1.某个节点到所有节点的加权距离之和
|
||||
# 2.某个节点到该组内所有节点的加权距离之和
|
||||
def sensor(self, SS: pandas.DataFrame, G: networkx.Graph, group: dict[int, list[str]]):
|
||||
"""
|
||||
sensor 方法是用来根据灵敏度矩阵 SS 和加权图 G 来确定传感器布置位置的
|
||||
:param SS: 灵敏度矩阵,每个节点的行和列代表不同节点,矩阵元素表示节点间的灵敏度。SS.iloc[i, :] 表示第 i 行对应节点 i 到所有其他节点的灵敏度
|
||||
:param G: 加权图,表示管网的拓扑结构,每个节点通过管道连接。图的边的权重通常是根据水力距离或者流量等计算的
|
||||
:param group: 节点分组,字典的键是分组编号,值是该组的节点名称列表
|
||||
:return:
|
||||
"""
|
||||
# 传感器布置个数以及位置
|
||||
# W = self.weight()
|
||||
n = self.n - len(self.delnodes)
|
||||
nodes = copy.deepcopy(self.nodes)
|
||||
for node in self.delnodes:
|
||||
nodes.remove(node)
|
||||
# sumSS:list[float],每个节点到其他节点的灵敏度之和。SS.iloc[i, :] 返回第 i 个节点与所有其他节点的灵敏度值,sum(SS.iloc[i, :]) 计算这些灵敏度值的总和。
|
||||
sumSS = []
|
||||
for i in range(0, n):
|
||||
sumSS.append(sum(SS.iloc[i, :]))
|
||||
# 一个整数范围,表示每个节点的索引,用作sumSS_ DataFrame的索引
|
||||
indices = range(0, n)
|
||||
# sumSS_:pandas.DataFrame,将 sumSS 转换成 DataFrame 格式,并且将节点的总灵敏度保存到 CSV 文件 sumSS_data.csv 中
|
||||
sumSS_ = pd.DataFrame(np.array(sumSS), index=indices)
|
||||
sumSS_.to_csv('sumSS_data.csv') # 存储节点总灵敏度
|
||||
# sumSS:pandas.DataFrame,sumSS 被转换为 DataFrame 类型,并且按总灵敏度(即灵敏度之和)降序排列。此时,sumSS 是按节点的灵敏度之和排序的 DataFrame
|
||||
sumSS = pd.DataFrame(np.array(sumSS), index=nodes)
|
||||
sumSS = sumSS.sort_values(by=[0], ascending=[False])
|
||||
# sensorindex:list[str],用于存储根据灵敏度排序选出的传感器位置的节点名称,存储根据总灵敏度排序的节点列表,用于传感器布置
|
||||
sensorindex = []
|
||||
# sensorindex_2:list[str],用于存储每组内根据灵敏度排序选出的传感器位置的节点名称,存储每个组内根据灵敏度排序选择的传感器节点
|
||||
sensorindex_2 = []
|
||||
# group_S:dict[int, pandas.DataFrame],存储每个组内的灵敏度矩阵
|
||||
group_S = {}
|
||||
# group_sumSS:dict[int, list[float]],存储每个组内节点的总灵敏度,值为每个组内节点灵敏度之和的列表
|
||||
group_sumSS = {}
|
||||
for i in range(0, len(group)):
|
||||
for node in self.delnodes:
|
||||
# 这里的group[i]是每个组的节点列表,代码首先去除已经被标记为删除的节点self.delnodes
|
||||
if node in group[i]:
|
||||
group[i].remove(node)
|
||||
group_S[i] = SS.loc[group[i], group[i]]
|
||||
# 对每个组内的节点,计算组内节点的总灵敏度(group_sumSS[i])。它将每个组内节点的灵敏度值相加,并且按灵敏度降序排序
|
||||
group_sumSS[i] = []
|
||||
for j in range(0, len(group[i])):
|
||||
group_sumSS[i].append(sum(group_S[i].iloc[j, :]))
|
||||
group_sumSS[i] = pd.DataFrame(np.array(group_sumSS[i]), index=group[i])
|
||||
group_sumSS[i] = group_sumSS[i].sort_values(by=[0], ascending=[False])
|
||||
pass
|
||||
|
||||
# 1.选sumSS最大的节点,然后把这个节点所在的那个组删掉,就可以不再从这个组选点。再重新排序选sumSS最大的;
|
||||
# 2.在每组内选group_sumSS最大的节点
|
||||
# 在这个循环中,首先选择灵敏度最高的节点Smaxnode并添加到sensorindex。然后根据灵敏度排序,删除已选的节点并继续选择下一个灵敏度最大的节点。这个过程用于选择传感器的位置
|
||||
sensornum = self.sensornum
|
||||
for i in range(0, sensornum):
|
||||
# Smaxnode:str,最大灵敏度节点,sumSS.index[0] 表示灵敏度最高的节点
|
||||
Smaxnode = sumSS.index[0]
|
||||
sensorindex.append(Smaxnode)
|
||||
sensorindex_2.append(group_sumSS[i].index[0])
|
||||
|
||||
for key, value in group.items():
|
||||
if Smaxnode in value:
|
||||
sumSS = sumSS.drop(index=group[key])
|
||||
continue
|
||||
|
||||
sumSS = sumSS.sort_values(by=[0], ascending=[False])
|
||||
|
||||
return sensorindex, sensorindex_2
|
||||
|
||||
|
||||
# 2025/03/13
|
||||
def get_sensor_coord(name: str, sensor_num: int) -> dict[str, float]:
|
||||
"""
|
||||
获取布置测压点的坐标,初始测压点布置根据灵敏度来布置,计算初始情况下的校准过程的error
|
||||
:param name: 数据库名称
|
||||
:param sensor_num: 测压点数目
|
||||
:return: 测压点坐标字典
|
||||
"""
|
||||
# inp_file_real:str,输入文件名,表示原始水力模型文件的路径,该文件格式为 EPANET 输入文件(.inp),包含管网的结构信息、节点、管道、泵等数据
|
||||
inp_file_real = f'./db_inp/{name}.db.inp'
|
||||
# sensornum:int,需要布置的传感器数量
|
||||
# sensornum = sensor_num
|
||||
# wn_real:wntr.network.WaterNetworkModel,加载 EPANET 水力模型
|
||||
wn_real = wntr.network.WaterNetworkModel(inp_file_real) # 真实粗糙度的原始管网
|
||||
# sim_real:wntr.sim.EpanetSimulator,创建一个水力仿真器对象
|
||||
sim_real = wntr.sim.EpanetSimulator(wn_real)
|
||||
# results_real:wntr.sim.results.SimulationResults,运行仿真并返回结果
|
||||
results_real = sim_real.run_sim()
|
||||
|
||||
# real_C:list[float],包含所有管道粗糙度的列表
|
||||
real_C = wn_real.query_link_attribute('roughness').tolist()
|
||||
# wn_fun1:wn_func(继承自 object),创建 wn_func 类的实例,传入 wn_real 水力模型对象。wn_func 用于计算管网相关的水力属性,比如水力距离、灵敏度等
|
||||
wn_fun1 = wn_func(wn_real)
|
||||
# nodes:list[str],管网的节点名称列表
|
||||
nodes = wn_fun1.nodes
|
||||
# delnodes:list[str],被删除的节点(如水库、泵、阀门连接的节点等)
|
||||
delnodes = wn_fun1.delnodes
|
||||
# Coor_node:pandas.DataFrame
|
||||
Coor_node = getCoor(wn_real)
|
||||
Coor_node = Coor_node.drop(wn_fun1.delnodes)
|
||||
nodes = [node for node in wn_fun1.nodes if node not in delnodes]
|
||||
# coordinates:pandas.Series,存储所有节点的坐标,类型为 Series,索引为节点名称,值为 (x, y) 坐标对
|
||||
coordinates = wn_fun1.coordinates
|
||||
|
||||
# 随机产生监测点
|
||||
# junctionnum:int,nodes 的长度,表示节点的数量
|
||||
junctionnum = len(nodes)
|
||||
# random_numbers:list[int],使用 random.sample 随机选择 sensornum(20)个节点的编号。它返回一个不重复的随机编号列表
|
||||
# random_numbers = random.sample(range(junctionnum), sensor_num)
|
||||
# for i in range(sensor_num):
|
||||
# # print(random_numbers[i])
|
||||
|
||||
wn_fun1.get_Conn()
|
||||
# hL:pandas.DataFrame,水力距离矩阵,表示每个节点到其他节点的水力阻力
|
||||
# G:networkx.DiGraph,加权有向图,表示管网的拓扑结构,节点之间的边带有权重
|
||||
hL, G = wn_fun1.CtoS()
|
||||
# SS:pandas.DataFrame,灵敏度矩阵,表示每个节点对管网变化(如粗糙度、流量等)的响应
|
||||
SS = wn_fun1.Jaco(hL)
|
||||
# group:dict[int, list[str]],使用 kgroup 函数将节点按坐标分成若干组,每组包含的节点数不一定相同。group 是一个字典,键为分组编号,值为节点名列表
|
||||
group = kgroup(Coor_node, sensor_num)
|
||||
# wn_fun:Sensorplacement(继承自wn_func)
|
||||
# 创建Sensorplacement类的实例,传入水力网络模型wn_real和传感器数量sensornum。Sensorplacement用于计算和布置传感器
|
||||
wn_fun = Sensorplacement(wn_real, sensor_num)
|
||||
wn_fun.__dict__.update(wn_fun1.__dict__)
|
||||
# sensorindex:list[str],初始传感器布置位置的节点名称
|
||||
# sensorindex_2:list[str],根据分组选择的传感器位置
|
||||
sensorindex, sensorindex_2 = wn_fun.sensor(SS, G, group) # 初始的sensorindex
|
||||
# print(str(sensor_num), "个测压点,测压点位置:", sensorindex)
|
||||
sensor_coord = {}
|
||||
# 重新打开数据库
|
||||
if is_project_open(name=name):
|
||||
close_project(name=name)
|
||||
open_project(name=name)
|
||||
for node_id in sensorindex:
|
||||
sensor_coord[node_id] = get_node_coord(name=name, node_id=node_id)
|
||||
close_project(name=name)
|
||||
# print(sensor_coord)
|
||||
return sensor_coord
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sensor_coord = get_sensor_coord(name=project_info.name, sensor_num=20)
|
||||
print(sensor_coord)
|
||||
# '''
|
||||
# 初始测压点布置根据灵敏度来布置,计算初始情况下的校准过程的error
|
||||
# '''
|
||||
#
|
||||
# # inp_file_real:str,输入文件名,表示原始水力模型文件的路径,该文件格式为 EPANET 输入文件(.inp),包含管网的结构信息、节点、管道、泵等数据
|
||||
# inp_file_real = './db_inp/bb.db.inp'
|
||||
# # sensornum:int,需要布置的传感器数量
|
||||
# sensornum = 20
|
||||
# # wn_real:wntr.network.WaterNetworkModel,加载 EPANET 水力模型
|
||||
# wn_real = wntr.network.WaterNetworkModel(inp_file_real) # 真实粗糙度的原始管网
|
||||
# # sim_real:wntr.sim.EpanetSimulator,创建一个水力仿真器对象
|
||||
# sim_real = wntr.sim.EpanetSimulator(wn_real)
|
||||
# # results_real:wntr.sim.results.SimulationResults,运行仿真并返回结果
|
||||
# results_real = sim_real.run_sim()
|
||||
#
|
||||
# # real_C:list[float],包含所有管道粗糙度的列表
|
||||
# real_C = wn_real.query_link_attribute('roughness').tolist()
|
||||
# # wn_fun1:wn_func(继承自 object),创建 wn_func 类的实例,传入 wn_real 水力模型对象。wn_func 用于计算管网相关的水力属性,比如水力距离、灵敏度等
|
||||
# wn_fun1 = wn_func(wn_real)
|
||||
# # nodes:list[str],管网的节点名称列表
|
||||
# nodes = wn_fun1.nodes
|
||||
# # delnodes:list[str],被删除的节点(如水库、泵、阀门连接的节点等)
|
||||
# delnodes = wn_fun1.delnodes
|
||||
# # Coor_node:pandas.DataFrame
|
||||
# Coor_node = getCoor(wn_real)
|
||||
# Coor_node = Coor_node.drop(wn_fun1.delnodes)
|
||||
# nodes = [node for node in wn_fun1.nodes if node not in delnodes]
|
||||
# # coordinates:pandas.Series,存储所有节点的坐标,类型为 Series,索引为节点名称,值为 (x, y) 坐标对
|
||||
# coordinates = wn_fun1.coordinates
|
||||
#
|
||||
# # 随机产生监测点
|
||||
# # junctionnum:int,nodes 的长度,表示节点的数量
|
||||
# junctionnum = len(nodes)
|
||||
# # random_numbers:list[int],使用 random.sample 随机选择 sensornum(20)个节点的编号。它返回一个不重复的随机编号列表
|
||||
# random_numbers = random.sample(range(junctionnum), sensornum)
|
||||
# for i in range(sensornum):
|
||||
# print(random_numbers[i])
|
||||
#
|
||||
# wn_fun1.get_Conn()
|
||||
# # hL:pandas.DataFrame,水力距离矩阵,表示每个节点到其他节点的水力阻力
|
||||
# # G:networkx.DiGraph,加权有向图,表示管网的拓扑结构,节点之间的边带有权重
|
||||
# hL, G = wn_fun1.CtoS()
|
||||
# # SS:pandas.DataFrame,灵敏度矩阵,表示每个节点对管网变化(如粗糙度、流量等)的响应
|
||||
# SS = wn_fun1.Jaco(hL)
|
||||
# # group:dict[int, list[str]],使用 kgroup 函数将节点按坐标分成若干组,每组包含的节点数不一定相同。group 是一个字典,键为分组编号,值为节点名列表
|
||||
# group = kgroup(Coor_node, sensornum)
|
||||
# # wn_fun:Sensorplacement(继承自wn_func)
|
||||
# # 创建Sensorplacement类的实例,传入水力网络模型wn_real和传感器数量sensornum。Sensorplacement用于计算和布置传感器
|
||||
# wn_fun = Sensorplacement(wn_real, sensornum)
|
||||
# wn_fun.__dict__.update(wn_fun1.__dict__)
|
||||
# # sensorindex:list[str],初始传感器布置位置的节点名称
|
||||
# # sensorindex_2:list[str],根据分组选择的传感器位置
|
||||
# sensorindex, sensorindex_2 = wn_fun.sensor(SS, G, group) # 初始的sensorindex
|
||||
# print(str(sensornum), "个测压点,测压点位置:", sensorindex)
|
||||
|
||||
# # 分区画图
|
||||
# colorlist = ['lightpink', 'coral', 'rosybrown', 'olive', 'powderblue', 'lightskyblue', 'steelblue', 'peachpuff','brown','silver','indigo','lime','gold','violet','maroon','navy','teal','magenta','cyan',
|
||||
# 'burlywood', 'tan', 'slategrey', 'thistle', 'lightseagreen', 'lightgreen', 'red','blue','yellow','orange','purple','grey','green','pink','lightblue','beige','chartreuse','turquoise','lavender','fuchsia','coral']
|
||||
# G = wn_real.to_graph()
|
||||
# G = G.to_undirected() # 变为无向图
|
||||
# pos = nx.get_node_attributes(G, 'pos')
|
||||
# pass
|
||||
# for i in range(0, sensornum):
|
||||
# ax = plt.gca()
|
||||
# ax.set_title(inp_file_real + str(sensornum))
|
||||
# nodes = nx.draw_networkx_nodes(G, pos, nodelist=group[i], node_color=colorlist[i], node_size=20)
|
||||
# nodes = nx.draw_networkx_nodes(G, pos,
|
||||
# nodelist=sensorindex_2, node_color='black', node_size=70, node_shape='*'
|
||||
# )
|
||||
# edges = nx.draw_networkx_edges(G, pos)
|
||||
# ax.spines['top'].set_visible(False)
|
||||
# ax.spines['right'].set_visible(False)
|
||||
# ax.spines['bottom'].set_visible(False)
|
||||
# ax.spines['left'].set_visible(False)
|
||||
# plt.savefig(inp_file_real + str(sensornum) + ".png")
|
||||
# plt.show()
|
||||
#
|
||||
# wntr.graphics.plot_network(wn_real, node_attribute=sensorindex_2, node_size=50, node_labels=False,
|
||||
# title=inp_file_real + '_Projetion' + str(sensornum))
|
||||
# plt.savefig(inp_file_real + '_S' + str(sensornum) + ".png")
|
||||
# plt.show()
|
||||
Reference in New Issue
Block a user