import numpy as np from tjnetwork import * from api.s36_wda_cal import * # from get_real_status import * from datetime import datetime,timedelta from math import modf import json import pytz import requests import time import shutil from epanet.epanet import Output from typing import Optional, Tuple import influxdb_api import typing import psycopg import logging import globals # 数据接口 # url_path = 'http://10.101.15.16:9000/loong' # 内网 # url_path = 'http://183.64.62.100:9057/loong' # 外网 # url_real = url_path + '/api/mpoints/realValue' # url_hist = url_path + '/api/curves/data' # # 实时数据的设备编号 # DN_900_ID='2498' # DN_500_ID='3854' # DN_1000_ID='3853' # H_RESSURE='2510' # L_PRESURE='2514' # H_TANK='4780' # L_TANK='4854' # # # inp文件数据信息 PATTERN_TIME_STEP = 15.0 # regions # regions = ['hp', 'lp'] # regions_demand_patterns = {'hp': ['DN900', 'DN500'], 'lp': ['DN1000']} # 出厂水量近似表示用水量 # # regions_patterns = {'hp': ['ChuanYiJiXiao', 'BeiQuanHuaYuan', 'ZhuangYuanFuDi', 'JingNingJiaYuan', # # '308', 'JiaYinYuan', 'XinChengGuoJi', 'YiJingBeiChen', 'ZhongYangXinDu', # # 'XinHaiJiaYuan', 'DongFengJie', 'DingYaXinYu', 'ZiYunTai', 'XieMaGuangChang', # # 'YongJinFu', 'BianDianZhan', 'BeiNanDaDao', 'TianShengLiJie', 'XueYuanXiaoQu', # # 'YunHuaLu', 'GaoJiaQiao', 'LuZuoFuLuXiaDuan', 'TianRunCheng', 'CaoJiaBa', # # 'PuLingChang', 'QiLongXiaoQu', 'TuanXiao', # # 'TuanShanBaoZhongShiHua', 'XieMa', 'BeiWenQuanJiuHaoErQi', 'LaiYinHuSiQi', # # 'DN500', 'DN900'], # # 'lp': ['PanXiMingDu', 'WanKeJinYuHuaFuGaoCeng', 'KeJiXiao', # # 'LuGouQiao', 'LongJiangHuaYuan', 'LaoQiZhongDui', 'ShiYanCun', 'TianQiDaSha', # # 'TianShengPaiChuSuo', 'TianShengShangPin', 'JiaoTang', 'RenMinHuaYuan', # # 'TaiJiBinJiangYiQi', 'TianQiHuaYuan', 'TaiJiBinJiangErQi', '122Zhong', # # 'WanKeJinYuHuaFuYangFang', 'ChengBeiCaiShiKou', 'WenXingShe', 'YueLiangTianBBGJCZ', # # 'YueLiangTian', 'YueLiangTian200', 'ChengTaoChang', 'HuoCheZhan', 'LiangKu', 'QunXingLu', # # 'JiuYuanErTongYiYuan', 'TangDouHua', 'TaiJiBinJiangErQi(SanJi)', # # 'ZhangDouHua', 'JinYunXiaoQuDN400', # # 'DN1000']} # # # nodes # monitor_single_patterns = ['ChuanYiJiXiao', 'BeiQuanHuaYuan', 'ZhuangYuanFuDi', 'JingNingJiaYuan', # '308', 'JiaYinYuan', 'XinChengGuoJi', 'YiJingBeiChen', 'ZhongYangXinDu', # 'XinHaiJiaYuan', 'DongFengJie', 'DingYaXinYu', 'ZiYunTai', 'XieMaGuangChang', # 'YongJinFu', 'PanXiMingDu', 'WanKeJinYuHuaFuGaoCeng', 'KeJiXiao', # 'LuGouQiao', 'LongJiangHuaYuan', 'LaoQiZhongDui', 'ShiYanCun', 'TianQiDaSha', # 'TianShengPaiChuSuo', 'TianShengShangPin', 'JiaoTang', 'RenMinHuaYuan', # 'TaiJiBinJiangYiQi', 'TianQiHuaYuan', 'TaiJiBinJiangErQi', '122Zhong', # 'WanKeJinYuHuaFuYangFang'] # monitor_single_patterns_id = {'ChuanYiJiXiao': '7338', 'BeiQuanHuaYuan': '7315', 'ZhuangYuanFuDi': '7316', # 'JingNingJiaYuan': '7528', '308': '8272', 'JiaYinYuan': '7304', # 'XinChengGuoJi': '7325', 'YiJingBeiChen': '7328', 'ZhongYangXinDu': '7329', # 'XinHaiJiaYuan': '9138', 'DongFengJie': '7302', 'DingYaXinYu': '7331', # 'ZiYunTai': '7420,9059', 'XieMaGuangChang': '7326', 'YongJinFu': '9059', # 'PanXiMingDu': '7320', 'WanKeJinYuHuaFuGaoCeng': '7419', # 'KeJiXiao': '7305', 'LuGouQiao': '7306', 'LongJiangHuaYuan': '7318', # 'LaoQiZhongDui': '9075', 'ShiYanCun': '7309', 'TianQiDaSha': '7323', # 'TianShengPaiChuSuo': '7335', 'TianShengShangPin': '7324', 'JiaoTang': '7332', # 'RenMinHuaYuan': '7322', 'TaiJiBinJiangYiQi': '7333', 'TianQiHuaYuan': '8235', # 'TaiJiBinJiangErQi': '7334', '122Zhong': '7314', 'WanKeJinYuHuaFuYangFang': '7418'} # # monitor_unity_patterns = ['BianDianZhan', 'BeiNanDaDao', 'TianShengLiJie', 'XueYuanXiaoQu', # 'YunHuaLu', 'GaoJiaQiao', 'LuZuoFuLuXiaDuan', 'TianRunCheng', # 'CaoJiaBa', 'PuLingChang', 'QiLongXiaoQu', 'TuanXiao', # 'ChengBeiCaiShiKou', 'WenXingShe', 'YueLiangTianBBGJCZ', # 'YueLiangTian', 'YueLiangTian200', # 'ChengTaoChang', 'HuoCheZhan', 'LiangKu', 'QunXingLu', # 'TuanShanBaoZhongShiHua', 'XieMa', 'BeiWenQuanJiuHaoErQi', 'LaiYinHuSiQi', # 'JiuYuanErTongYiYuan', 'TangDouHua', 'TaiJiBinJiangErQi(SanJi)', # 'ZhangDouHua', '', # 'DN500', 'DN900', 'DN1000'] # monitor_unity_patterns_id = {'BianDianZhan': '7339', 'BeiNanDaDao': '7319', 'TianShengLiJie': '8242', # 'XueYuanXiaoQu': '7327', 'YunHuaLu': '7312', 'GaoJiaQiao': '7340', # 'LuZuoFuLuXiaDuan': '7343', 'TianRunCheng': '7310', 'CaoJiaBa': '7300', # 'PuLingChang': '7307', 'QiLongXiaoQu': '7321', 'TuanXiao': '8963', # 'ChengBeiCaiShiKou': '7330', 'WenXingShe': '7311', # 'YueLiangTianBBGJCZ': '7313', 'YueLiangTian': '7313', 'YueLiangTian200': '7313', # 'ChengTaoChang': '7301', 'HuoCheZhan': '7303', # 'LiangKu': '7296', 'QunXingLu': '7308', # 'DN500': '3854', 'DN900': '2498', 'DN1000': '3853'} # monitor_patterns = monitor_single_patterns + monitor_unity_patterns # monitor_patterns_id = {**monitor_single_patterns_id, **monitor_unity_patterns_id} # flow # hp_flow_pattern_id = {'DN900': '2498', 'DN500': '3854'} # lp_flow_pattern_id = {'DN1000': '3853'} # # # pumps # pump_pattern_ids = ['1#', '2#', '3#', '4#', '5#', '6#', '7#'] # pumps = ['PU00000', 'PU00001', 'PU00002', 'PU00003', 'PU00004', 'PU00005', 'PU00006'] # variable_frequency_pumps = ['PU00004', 'PU00005', 'PU00006'] # fixed_pumps_id = {'PU00000': '2747', 'PU00001': '2776', 'PU00002': '2730', 'PU00003': '2787'} # variable_pumps_id = {'PU00004': '2500', 'PU00005': '2502', 'PU00006': '2504'} # # # reservoirs # # reservoirs = ['ZBBDJSCP000002', 'R00003'] # # reservoirs_id = {'ZBBDJSCP000002': '2497', 'R00003': '2571'} # # tanks # tanks = ['ZBBDTJSC000002', 'ZBBDTJSC000001'] # tanks_id = {'ZBBDTJSC000002': '4780', 'ZBBDTJSC000001': '9774'} # # # 用于更改数据的SCADA设的ID # change_data_device_ids = ['2498', '3854', '3853', '2497', '2571', '4780', '9774', # '2747', '2776', '2730', '2787', '2500', '2502', '2504'] # # 实时数据类:element_id和api_query_id对应 # reservoirs_id = {} # tanks_id = {} # fixed_pumps_id ={} # variable_pumps_id = {} # pressure_id = {} # demand_id = {} # quality_id = {} # # # 实时数据类:pattern_id和api_query_id对应 # source_outflow_pattern_id = {} # realtime_pipe_flow_pattern_id = {} # pipe_flow_region_patterns = {} # 根据realtime的pipe_flow,对non_realtime的demand进行分区 # # # 分区查询 # source_outflow_region = {} # 以绑定的管段作为value # source_outflow_region_id = {} # 以api_query_id作为value # source_outflow_region_patterns = {} # 以associated_pattern作为value # # 非实时数据的pattern # non_realtime_region_patterns = {} # 基于source_outflow_region进行区分 # # realtime_region_pipe_flow_and_demand_id = {} # 基于source_outflow_region搜索该分区中的实时pipe_flow和demand的api_query_id,后续用region的流量 - 实时流量计的流量 # realtime_region_pipe_flow_and_demand_patterns = {} # 基于source_outflow_region搜索该分区中的实时pipe_flow和demand的associated_pattern,后续用region的流量 - 实时流量计的流量 logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') def query_corresponding_element_id_and_query_id(name: str) -> None: """ 查询scada_info这张表中,realtime类型的记录中,associated_element_id与api_query_id的对应关系 :param name: 数据库名称 :return: """ # 连接数据库 conn_string = f"dbname={name} host=127.0.0.1" try: with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 查询 transmission_mode 为 'realtime' 的记录 cur.execute(""" SELECT type, associated_element_id, api_query_id FROM scada_info WHERE transmission_mode = 'realtime'; """) records = cur.fetchall() # 遍历查询结果,并根据 type 将数据存储到相应的字典中 for record in records: type_, associated_element_id, api_query_id = record if type_ == 'reservoir_liquid_level': globals.reservoirs_id[associated_element_id] = api_query_id elif type_ == 'tank_liquid_level': globals.tanks_id[associated_element_id] = api_query_id elif type_ == 'fixed_pump': globals.fixed_pumps_id[associated_element_id] = api_query_id elif type_ == 'variable_pump': globals.variable_pumps_id[associated_element_id] = api_query_id elif type_ == 'pressure': globals.pressure_id[associated_element_id] = api_query_id elif type_ == 'demand': globals.demand_id[associated_element_id] = api_query_id elif type_ == 'quality': globals.quality_id[associated_element_id] = api_query_id else: # 如果遇到未定义的类型,可以选择记录日志或忽略 print(f"未处理的类型: {type_}") except psycopg.Error as e: print(f"数据库连接或查询出错: {e}") def query_corresponding_pattern_id_and_query_id(name: str) -> None: """ 查询 scada_info 表中 transmission_mode 为 'realtime',且 type 为 'source_outflow' 或 'pipe_flow' 的记录, 提取 associated_pattern 和 api_query_id 的对应关系,并分别存储到对应的字典中。 :param name: 数据库名称 :return: """ # 连接数据库 conn_string = f"dbname={name} host=127.0.0.1" try: with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 查询 transmission_mode 为 'realtime' 且 type 为 'source_outflow' 或 'pipe_flow' 的记录 cur.execute(""" SELECT type, associated_pattern, api_query_id FROM scada_info WHERE transmission_mode = 'realtime' AND type IN ('source_outflow', 'pipe_flow'); """) records = cur.fetchall() # 遍历查询结果,并根据 type 将数据存储到相应的字典中 for record in records: type_, associated_pattern, api_query_id = record if type_ == 'source_outflow': globals.source_outflow_pattern_id[associated_pattern] = api_query_id elif type_ == 'pipe_flow': globals.realtime_pipe_flow_pattern_id[associated_pattern] = api_query_id except psycopg.Error as e: print(f"数据库连接或查询出错: {e}") # 2025/01/11 def query_non_realtime_region(name: str) -> dict: """ 查询 scada_info 表中 transmission_mode 为 'non_realtime',且 type 为 'pipe_flow' 的记录, 提取所有以 'associated_source_outflow_id' 开头的列的值,并将每条记录的这些值作为一个 region(region1, region2, ...), 最后去掉重复的 region,并存储到 source_outflow_region 字典中。 :param name: 数据库名字 :return: 包含区域与对应 associated_source_outflow_id 的字典 """ source_outflow_regions = [] # 用于存储所有 region(包含重复的) # 构建连接字符串 conn_string = f"dbname={name} host=127.0.0.1" try: # 连接到数据库 with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 执行查询,筛选出 transmission_mode 为 'non_realtime' 且 type 为 'pipe_flow' 的记录 cur.execute(""" SELECT * FROM scada_info WHERE transmission_mode = 'non_realtime' AND type = 'pipe_flow'; """) records = cur.fetchall() col_names = [desc.name for desc in cur.description] # 找出所有以 'associated_source_outflow_id' 开头的列 source_outflow_cols = [col for col in col_names if col.startswith('associated_source_outflow_id')] logging.info(f"Identified source_outflow columns: {source_outflow_cols}") for record in records: # 提取所有以 'associated_source_outflow_id' 开头的列的值,排除 None values = [record[col_names.index(col)] for col in source_outflow_cols if record[col_names.index(col)] is not None] # 如果该记录有相关的值,则将其作为一个 region if values: # 将值排序以确保相同的组合顺序一致(如果顺序不重要) # 如果顺序重要,请删除排序步骤 region_tuple = tuple(sorted(values)) source_outflow_regions.append(region_tuple) # 移除重复的 regions unique_regions = [] seen = set() for region in source_outflow_regions: if region not in seen: seen.add(region) unique_regions.append(region) # 为每个唯一的 region 分配一个 region 键 for idx, region in enumerate(unique_regions, 1): region_key = f"region{idx}" globals.source_outflow_region[region_key] = list(region) logging.info("查询并处理数据成功。") except psycopg.Error as e: logging.error(f"数据库连接或查询出错: {e}") except Exception as ex: logging.error(f"处理数据时出错: {ex}") return globals.source_outflow_region # 2025/01/18 def query_non_realtime_region_patterns(name: str, source_outflow_region: dict, column_prefix: str = 'associated_source_outflow_id') -> dict: """ 根据 source_outflow_region,对 scada_info 表中 transmission_mode 为 'non_realtime'的记录进行分组, 将匹配的记录的 associated_pattern 存入 non_realtime_region_patterns 字典中,同时把用 realtime pipe_flow修正的 non_realtime demand 去掉 :param name: 数据库名称 :param source_outflow_region: 包含区域与对应 associated_source_outflow_id 的字典 :param column_prefix: 需要提取的列的前缀 :return: 包含区域与对应 associated_pattern 的字典 """ globals.non_realtime_region_patterns = {region: [] for region in globals.source_outflow_region.keys()} region_tuple_to_key = {frozenset(ids): region for region, ids in globals.source_outflow_region.items()} conn_string = f"dbname={name} host=127.0.0.1" try: with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 执行查询,筛选出 transmission_mode 为 'non_realtime' cur.execute(""" SELECT * FROM scada_info WHERE transmission_mode = 'non_realtime' """) records = cur.fetchall() col_names = [desc.name for desc in cur.description] # 找出所有以指定前缀开头的列 source_outflow_cols = [col for col in col_names if col.startswith(column_prefix)] logging.info(f"Identified source_outflow columns: {source_outflow_cols}") # 确保 'associated_pattern' 列存在 if 'associated_pattern' not in col_names: logging.error("'associated_pattern' column not found in scada_info table.") return globals.non_realtime_region_patterns # 获取 'associated_pattern' 列的索引 pattern_idx = col_names.index('associated_pattern') for record in records: # 提取所有以 'associated_source_outflow_id' 开头的列的值,排除 None values = [record[col_names.index(col)] for col in source_outflow_cols if record[col_names.index(col)] is not None] if values: # 将值转换为 frozenset 以便与 region_tuple_to_key 进行匹配 region_frozenset = frozenset(values) # 检查是否存在匹配的 region region_key = region_tuple_to_key.get(region_frozenset) if region_key: # 获取 'associated_pattern' 的值 associated_pattern = record[pattern_idx] if associated_pattern is not None: globals.non_realtime_region_patterns[region_key].append(associated_pattern) logging.info("生成 regions_patterns 成功。") except psycopg.Error as e: logging.error(f"数据库连接或查询出错: {e}") except Exception as ex: logging.error(f"处理数据时出错: {ex}") # 获取pipe_flow_region_patterns中的所有区域 exclude_regions = set(region for regions in globals.pipe_flow_region_patterns.values() for region in regions) # 从non_realtime_region_patterns中去除这些区域 for region_key, regions in globals.non_realtime_region_patterns.items(): globals.non_realtime_region_patterns[region_key] = [region for region in regions if region not in exclude_regions] return globals.non_realtime_region_patterns # 2025/01/18 def query_realtime_region_pipe_flow_and_demand_id(name: str, source_outflow_region: dict, column_prefix: str = 'associated_source_outflow_id') -> dict: """ 根据 source_outflow_region,对 scada_info 表中 transmission_mode 为 'realtime', 且 type 为 'pipe_flow' 或 ‘demand’ 的记录进行分组,将匹配的记录的 api_query_id 存入 realtime_region_pipe_flow_and_demand_id 字典中。 :param name: 数据库名称 :param source_outflow_region: 包含区域与对应 associated_source_outflow_id 的字典 :param column_prefix: 需要提取的列的前缀 :return: 包含区域与对应 api_query_id 的字典 """ globals.realtime_region_pipe_flow_and_demand_id = {region: [] for region in globals.source_outflow_region.keys()} # 创建一个映射,从 frozenset(ids) 到 region_key region_tuple_to_key = {frozenset(ids): region for region, ids in globals.source_outflow_region.items()} conn_string = f"dbname={name} host=127.0.0.1" try: with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 执行查询,筛选出 transmission_mode 为 'realtime' 且 type 为 'pipe_flow' 或 'demand' 的记录 cur.execute(""" SELECT * FROM scada_info WHERE transmission_mode = 'realtime' AND type IN ('pipe_flow', 'demand'); """) records = cur.fetchall() col_names = [desc.name for desc in cur.description] # 找出所有以指定前缀开头的列 source_outflow_cols = [col for col in col_names if col.startswith(column_prefix)] logging.info(f"Identified source_outflow columns: {source_outflow_cols}") # 确保 'api_query_id' 列存在 if 'api_query_id' not in col_names: logging.error("'api_query_id' column not found in scada_info table.") return globals.realtime_region_pipe_flow_and_demand_id # 获取 'api_query_id' 列的索引 api_query_id_idx = col_names.index('api_query_id') for record in records: # 提取所有以 'associated_source_outflow_id' 开头的列的值,排除 None values = [record[col_names.index(col)] for col in source_outflow_cols if record[col_names.index(col)] is not None] if values: # 将值转换为 frozenset 以便与 region_tuple_to_key 进行匹配 region_frozenset = frozenset(values) # 检查是否存在匹配的 region region_key = region_tuple_to_key.get(region_frozenset) if region_key: # 获取 'api_query_id' 的值 api_query_id = record[api_query_id_idx] if api_query_id is not None: globals.realtime_region_pipe_flow_and_demand_id[region_key].append(api_query_id) logging.info("生成 realtime_region_pipe_flow_and_demand_id 成功。") except psycopg.Error as e: logging.error(f"数据库连接或查询出错: {e}") except Exception as ex: logging.error(f"处理数据时出错: {ex}") return globals.realtime_region_pipe_flow_and_demand_id # 2025/01/17 def query_pipe_flow_region_patterns(name: str, column_prefix: str = 'associated_pipe_flow_id') -> dict: """ 查询 scada_info 表中 type 为 'demand' 且 transmission_mode 为 'non_realtime' 的记录, 记录该记录的 associated_pattern。 如果该记录的 associated_pipe_flow_id 存在, 且根据 associated_pipe_flow_id 查询的 associated_element_id 对应的记录的 transmission_mode 为 'realtime', 则将该记录的 associated_pattern 作为值记录到字典中,字典的 key 为 pipe_flow 类的 associated_pattern。 字典样式为:{'region1': ['P17021', 'ZBBGXSZW000377'], 'region2': ['P16504']} :param name: 数据库名称 :param column_prefix: 需要提取的列的前缀 :return: pipe_flow_region_patterns 字典 """ conn_string = f"dbname={name} host=127.0.0.1" try: with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 查询 type 为 'demand' 且 transmission_mode 为 'non_realtime' 的记录 cur.execute(""" SELECT associated_pattern, associated_pipe_flow_id FROM scada_info WHERE type = 'demand' AND transmission_mode = 'non_realtime'; """) records = cur.fetchall() col_names = [desc.name for desc in cur.description] # 获取列索引 pattern_idx = col_names.index('associated_pattern') pipe_flow_id_idx = col_names.index('associated_pipe_flow_id') for record in records: associated_pattern = record[pattern_idx] associated_pipe_flow_id = record[pipe_flow_id_idx] if associated_pipe_flow_id: # 根据 associated_pipe_flow_id 查询对应的记录 cur.execute(""" SELECT associated_pattern, transmission_mode FROM scada_info WHERE associated_element_id = %s; """, (associated_pipe_flow_id,)) pipe_flow_record = cur.fetchone() if pipe_flow_record: pipe_flow_associated_pattern = pipe_flow_record[0] transmission_mode = pipe_flow_record[1] if transmission_mode == 'realtime': # 将 associated_pattern 记录到字典中 if pipe_flow_associated_pattern not in globals.pipe_flow_region_patterns: globals.pipe_flow_region_patterns[pipe_flow_associated_pattern] = [] globals.pipe_flow_region_patterns[pipe_flow_associated_pattern].append(associated_pattern) logging.info("生成 pipe_flow_region_patterns 成功。") except psycopg.Error as e: logging.error(f"数据库连接或查询出错: {e}") except Exception as ex: logging.error(f"处理数据时出错: {ex}") return globals.pipe_flow_region_patterns # 2025/01/11 def get_source_outflow_region_id(name: str, source_outflow_region: dict, column_prefix: str = 'associated_source_outflow_id') -> dict: """ 基于 source_outflow_region,将其中的 associated_source_outflow_id 替换为对应的 api_query_id, 生成新的字典 source_outflow_region_id。 :param name: 数据库名称 :param source_outflow_region: 包含区域与对应 associated_source_outflow_id 的字典 :param column_prefix: 需要提取的列的前缀 :return: 包含区域与对应 api_query_id 的字典 """ globals.source_outflow_region_id = {region: [] for region in globals.source_outflow_region.keys()} # 提取所有唯一的 associated_source_outflow_id all_ids = set() for ids in globals.source_outflow_region.values(): all_ids.update(ids) if not all_ids: logging.warning("No associated_source_outflow_id found in source_outflow_region.") return globals.source_outflow_region_id conn_string = f"dbname={name} host=127.0.0.1" try: with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 查询 associated_element_id 和 api_query_id query = f""" SELECT associated_element_id, api_query_id FROM scada_info WHERE associated_element_id = ANY(%s) """ cur.execute(query, (list(all_ids),)) rows = cur.fetchall() # 构建 associated_source_outflow_id 到 api_query_id 的映射 id_to_api_query_id = {} for row in rows: associated_id = row[0] api_query_id = row[1] if associated_id in all_ids and api_query_id is not None: id_to_api_query_id[associated_id] = str(api_query_id) # 替换 source_outflow_region 中的 associated_source_outflow_id 为 api_query_id for region, ids in globals.source_outflow_region.items(): for id_ in ids: api_id = id_to_api_query_id.get(id_) if api_id: globals.source_outflow_region_id[region].append(api_id) else: logging.warning(f"No api_query_id found for associated_source_outflow_id: {id_}") except psycopg.Error as e: logging.error(f"数据库连接或查询出错: {e}") except Exception as ex: logging.error(f"处理数据时出错: {ex}") return globals.source_outflow_region_id # 2025/01/18 def get_realtime_region_patterns(name: str, source_outflow_region_id: dict, realtime_region_pipe_flow_and_demand_id: dict) -> (dict, dict): """ 根据每个 region,从 scada_info 表中查询 api_query_id 对应的 associated_pattern。 将结果分别存储到 source_outflow_region_patterns 和 realtime_region_pipe_flow_and_demand_patterns 两个字典中。 :param name: 数据库名称 :param source_outflow_region_id: 包含 region 与对应 api_query_id 的字典 :param realtime_region_pipe_flow_and_demand_id: 包含 region 与对应 api_query_id 的字典 :return: source_outflow_region_patterns 和 realtime_region_pipe_flow_and_demand_patterns 两个字典 """ # 初始化返回的字典 globals.source_outflow_region_patterns = {region: [] for region in globals.source_outflow_region_id.keys()} globals.realtime_region_pipe_flow_and_demand_patterns = {region: [] for region in globals.realtime_region_pipe_flow_and_demand_id.keys()} conn_string = f"dbname={name} host=127.0.0.1" try: with psycopg.connect(conn_string) as conn: with conn.cursor() as cur: # 遍历每个 region for region in globals.source_outflow_region_id.keys(): # 获取 source_outflow_region_id 的 api_query_id 并查询 associated_pattern source_outflow_api_ids = globals.source_outflow_region_id[region] if source_outflow_api_ids: api_query_ids_str = ", ".join([f"'{api_id}'" for api_id in source_outflow_api_ids]) cur.execute(f""" SELECT api_query_id, associated_pattern FROM scada_info WHERE api_query_id IN ({api_query_ids_str}); """) results = cur.fetchall() globals.source_outflow_region_patterns[region] = [ associated_pattern for _, associated_pattern in results if associated_pattern ] # 获取 realtime_region_pipe_flow_and_demand_id 的 api_query_id 并查询 associated_pattern realtime_api_ids = globals.realtime_region_pipe_flow_and_demand_id[region] if realtime_api_ids: api_query_ids_str = ", ".join([f"'{api_id}'" for api_id in realtime_api_ids]) cur.execute(f""" SELECT api_query_id, associated_pattern FROM scada_info WHERE api_query_id IN ({api_query_ids_str}); """) results = cur.fetchall() globals.realtime_region_pipe_flow_and_demand_patterns[region] = [ associated_pattern for _, associated_pattern in results if associated_pattern ] logging.info("生成 source_outflow_region_patterns 和 realtime_region_pipe_flow_and_demand_patterns 成功。") except psycopg.Error as e: logging.error(f"数据库连接或查询出错: {e}") except Exception as ex: logging.error(f"处理数据时出错: {ex}") return globals.source_outflow_region_patterns, globals.realtime_region_pipe_flow_and_demand_patterns def get_pattern_index(cur_datetime: str) -> int: """ 根据给定的日期时间字符串,计算并返回对应的模式索引。 :param cur_datetime: str, 当前的日期时间字符串,格式为“YYYY-MM-DD HH:MM:SS”。 :return: int, 基于预定义的时间步长 PATTERN_TIME_STEP。 """ str_format = "%Y-%m-%d %H:%M:%S" dt = datetime.strptime(cur_datetime, str_format) hr = dt.hour mnt = dt.minute i = int((hr * 60 + mnt) / PATTERN_TIME_STEP) return i def get_pattern_index_str(current_time: str) -> str: """ 根据当前时间获取时间步长的模式索引,并将其格式化为“HH:MM:00”字符串。 :param current_time: str, 当前时间,格式为"YYYY-MM-DD HH:MM:SS" :return: str, 以“HH:MM:00”格式返回 """ i = get_pattern_index(current_time) [minN, hrN] = modf(i * PATTERN_TIME_STEP / 60) minN_str = str(int(minN * 60)) minN_str = minN_str.zfill(2) hrN_str = str(int(hrN)) hrN_str = hrN_str.zfill(2) str_i = '{}:{}:00'.format(hrN_str, minN_str) return str_i def from_seconds_to_clock (secs: int)->str: """ 从秒格式化为“HH:MM:00”字符串 :param secs: int,秒 :return: str, 以“HH:MM:00”格式返回 """ hrs=int(secs/3600) minutes=int((secs-hrs*3600)/60) seconds=(secs-hrs*3600-minutes*60) hrs_str=str(hrs).zfill(2) minutes_str=str(minutes).zfill(2) seconds_str=str(seconds).zfill(2) str_clock='{}:{}:{}'.format(hrs_str,minutes_str,seconds_str) return str_clock def convert_time_format(original_time: str) -> str: """ 格式转换,将“2024-04-13T08:00:00+08:00"转为“2024-04-13 08:00:00” :param original_time: str, “2024-04-13T08:00:00+08:00"格式的时间 :return: str,“2024-04-13 08:00:00”格式的时间 """ new_time = original_time.replace('T', ' ') new_time = new_time.replace('+08:00', '') return new_time def get_history_pattern_info(project_name, pattern_name): """读取选定pattern的保存的历史pattern信息flow""" flow_list = [] patterns_info = read_all(project_name, f"select * from history_patterns_flows where id = '{pattern_name}' order by _order") for item in patterns_info: flow_list.append(float(item['flow'])) return flow_list # 2025/01/11 def run_simulation(name: str, simulation_type: str, modify_pattern_start_time: str, modify_total_duration: int = 0, modify_reservoir_head_pattern: dict[str, list] = None, modify_tank_initial_level: dict[str, float] = None, modify_junction_base_demand: dict[str, float] = None, modify_junction_damand_pattern: dict[str, list] = None, modify_pump_pattern: dict[str, list] = None): """ 传入需要修改的参数,改变数据库中对应位置的值,然后计算,返回结果 :param name: 模型名称,数据库中对应的名字 :param simulation_type: 模拟的类型,realtime为实时模拟,修改原数据库;extended为多步长模拟,需要复制数据库 :param modify_pattern_start_time: 模拟开始时间,格式为'2024-11-25T09:00:00+08:00' :param modify_total_duration: 模拟总历时 :param modify_reservoir_head_pattern: dict中包含多个水库模式,str为水库head_pattern的id,list为修改后的head_pattern :param modify_tank_initial_level: dict中包含多个水塔,str为水塔的id,float为修改后的initial_level :param modify_junction_base_demand: dict中包含多个节点,str为节点的id,float为修改后的base_demand :param modify_junction_damand_pattern: dict中包含多个节点模式,str为节点demand_pattern的id,list为修改后的demand_pattern :param modify_pump_pattern: dict中包含多个水泵模式,str为水泵pattern的id,list为修改后的pattern :return: """ # 记录开始时间 time_cost_start = time.perf_counter() print('{} -- Hydraulic simulation started.'.format( datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'))) # 重新打开数据库 if is_project_open(name): close_project(name) # 判断是实时模拟还是多步长模拟 if simulation_type.upper() == 'REALTIME': # 实时模拟(修改原数据库) name_c = name elif simulation_type.upper() == 'EXTENDED': # 扩展模拟(复制数据库) name_c = '_'.join([name, 'c']) if have_project(name_c): if is_project_open(name_c): close_project(name_c) delete_project(name_c) copy_project(name, name_c) # 备份项目 else: raise Exception('Incorrect simulation type, choose in (realtime, extended)') # 打开数据库 open_project(name_c) # 对输入的时间参数进行处理 pattern_start_time = convert_time_format(modify_pattern_start_time) # 获取模拟开始时间是对应pattern的第几个数 modify_index = get_pattern_index(pattern_start_time) # 遍历水泵的pattern_id,并根据输入的pump_pattern修改pattern的值 # for pump_pattern_id in pump_pattern_ids: # # 检查pump_pattern中pump_pattern_id对应的第一个频率值是否为有效数字(非空、非NaN)。如果该值有效,则继续执行代码块。 # if not np.isnan(modify_pump_pattern[pump_pattern_id][0]): # # 取出数据库中的pattern # pump_pattern = get_pattern(name_c, get_pump(name_c, pump_pattern_id)['pattern']) # # 替换数据库中的pattern为modify_pump_pattern # pump_pattern['factors'][modify_index: modify_index + len(modify_pump_pattern[pump_pattern_id])] \ # = modify_pump_pattern[pump_pattern_id] # cs = ChangeSet() # cs.append(pump_pattern) # set_pattern(name_c, cs) # 修改模拟开始的时间 str_pattern_start = get_pattern_index_str(convert_time_format(modify_pattern_start_time)) dic_time = get_time(name_c) dic_time['PATTERN START'] = str_pattern_start dic_time['DURATION'] = from_seconds_to_clock(modify_total_duration) cs = ChangeSet() cs.operations.append(dic_time) set_time(name_c, cs) if globals.reservoirs_id: # reservoirs_id = {'ZBBDJSCP000002': '2497', 'R00003': '2571'} # 1.获取reservoir的SCADA数据,形式如{'2497': '3.1231', '2571': '2.7387'} reservoir_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( query_ids_list=list(globals.reservoirs_id.values()), query_time=modify_pattern_start_time) # 2.构建出新字典,形式如{'ZBBDJSCP000002': '3.1231', 'R00003': '2.7387'} reservoir_dict = {key: reservoir_SCADA_data_dict[value] for key, value in globals.reservoirs_id.items()} # 3.修改reservoir液位模式 for reservoir_name, value in reservoir_dict.items(): if value and float(value) != 0: # 先根据reservoir获取对应的pattern,再对pattern进行修改 reservoir_pattern = get_pattern(name_c, get_reservoir(name_c, reservoir_name)['pattern']) reservoir_pattern['factors'][modify_index] = float(value) + globals.RESERVOIR_BASIC_HEIGHT cs = ChangeSet() cs.append(reservoir_pattern) set_pattern(name_c, cs) if globals.tanks_id: # 修改tank初始液位 tank_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( query_ids_list=list(globals.tanks_id.values()), query_time=modify_pattern_start_time) tank_dict = {key: tank_SCADA_data_dict[value] for key, value in globals.tanks_id.items()} for tank_name, value in tank_dict.items(): if value and float(value) != 0: tank = get_tank(name_c, tank_name) tank['init_level'] = float(value) cs = ChangeSet() cs.append(tank) set_tank(name_c, cs) if globals.fixed_pumps_id: # 修改工频泵的pattern fixed_pump_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( query_ids_list=list(globals.fixed_pumps_id.values()), query_time=modify_pattern_start_time) fixed_pump_dict = {key: fixed_pump_SCADA_data_dict[value] for key, value in globals.fixed_pumps_id.items()} for fixed_pump_name, value in fixed_pump_dict.items(): if value: pump_pattern = get_pattern(name_c, get_pump(name_c, fixed_pump_name)['pattern']) pump_pattern['factors'][modify_index] = float(value) cs = ChangeSet() cs.append(pump_pattern) set_pattern(name_c, cs) if globals.variable_pumps_id: # 修改变频泵的pattern variable_pump_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( query_ids_list=list(globals.variable_pumps_id.values()), query_time=modify_pattern_start_time) # print(variable_pump_SCADA_data_dict) variable_pump_dict = {key: variable_pump_SCADA_data_dict[value] for key, value in globals.variable_pumps_id.items()} # print(variable_pump_dict) for variable_pump_name, value in variable_pump_dict.items(): if value: pump_pattern = get_pattern(name_c, get_pump(name_c, fixed_pump_name)['pattern']) pump_pattern['factors'][modify_index] = float(value) / 50 cs = ChangeSet() cs.append(pump_pattern) set_pattern(name_c, cs) if globals.demand_id: # 基于实时数据,修改大用户节点的pattern demand_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( query_ids_list=list(globals.demand_id.values()), query_time=modify_pattern_start_time) demand_dict = {key: demand_SCADA_data_dict[value] for key, value in globals.demand_id.items()} for demand_name, value in demand_dict.items(): if value: demand_pattern = get_pattern(name_c, get_demand(name_c, demand_name)['pattern']) if get_option(name_c)['UNITS'] == 'LPS': demand_pattern['factors'][modify_index] = float(value) / 3.6 # m3/h 转换为 L/s elif get_option(name_c)['UNITS'] == 'CMH': demand_pattern['factors'][modify_index] = float(value) cs = ChangeSet() cs.append(demand_pattern) set_pattern(name_c, cs) # 水质、压力实时数据使用方法待补充 ############################# if globals.source_outflow_pattern_id: # 基于实时的出厂流量计数据,修改出厂流量计绑定的pattern source_outflow_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( query_ids_list=list(globals.source_outflow_pattern_id.values()), query_time=modify_pattern_start_time) # print(source_outflow_SCADA_data_dict) source_outflow_dict = {key: source_outflow_SCADA_data_dict[value] for key, value in globals.source_outflow_pattern_id.items()} # print(source_outflow_dict) for pattern_name in source_outflow_dict.keys(): # print(pattern_name) history_source_outflow_list = get_history_pattern_info(name_c, pattern_name) history_source_outflow = history_source_outflow_list[modify_index] # print(source_outflow_dict[pattern_name]) if source_outflow_dict[pattern_name]: realtime_source_outflow = float(source_outflow_dict[pattern_name]) multiply_factor = realtime_source_outflow / history_source_outflow pattern = get_pattern(name_c, pattern_name) pattern['factors'][modify_index] *= multiply_factor cs = ChangeSet() cs.append(pattern) set_pattern(name_c, cs) if globals.realtime_pipe_flow_pattern_id: # 基于实时的pipe_flow类数据,修改pipe_flow类绑定的pattern realtime_pipe_flow_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( query_ids_list=list(globals.realtime_pipe_flow_pattern_id.values()), query_time=modify_pattern_start_time) realtime_pipe_flow_dict = {key: realtime_pipe_flow_SCADA_data_dict[value] for key, value in globals.realtime_pipe_flow_pattern_id.items()} for pattern_name in realtime_pipe_flow_dict.keys(): history_pipe_flow_list = get_history_pattern_info(name_c, pattern_name) history_pipe_flow = history_pipe_flow_list[modify_index] if realtime_pipe_flow_dict[pattern_name]: realtime_pipe_flow = float(realtime_pipe_flow_dict[pattern_name]) multiply_factor = realtime_pipe_flow / history_pipe_flow pattern = get_pattern(name_c, pattern_name) pattern['factors'][modify_index] *= multiply_factor cs = ChangeSet() cs.append(pattern) set_pattern(name_c, cs) if globals.pipe_flow_region_patterns: # 基于实时的pipe_flow类数据,修改pipe_flow分区流量计范围内的non_realtime的demand绑定的pattern temp_realtime_pipe_flow_pattern_id = {} # 遍历 pipe_flow_region_patterns 字典的 key for pipe_flow_region, demand_patterns in globals.pipe_flow_region_patterns.items(): # 获取对应的实时值 query_api_id = globals.realtime_pipe_flow_pattern_id.get(pipe_flow_region) temp_realtime_pipe_flow_pattern_id[pipe_flow_region] = query_api_id temp_realtime_pipe_flow_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( query_ids_list=list(temp_realtime_pipe_flow_pattern_id.values()), query_time=modify_pattern_start_time) temp_realtime_pipe_flow_dict = {key: temp_realtime_pipe_flow_SCADA_data_dict[value] for key, value in temp_realtime_pipe_flow_pattern_id.items()} for pattern_name in temp_realtime_pipe_flow_dict.keys(): temp_history_pipe_flow_list = get_history_pattern_info(name_c, pattern_name) temp_history_pipe_flow = temp_history_pipe_flow_list[modify_index] if temp_realtime_pipe_flow_dict[pattern_name]: temp_realtime_pipe_flow = float(temp_realtime_pipe_flow_dict[pattern_name]) temp_multiply_factor = temp_realtime_pipe_flow / temp_history_pipe_flow temp_non_realtime_demand_pattern_list = globals.pipe_flow_region_patterns[pattern_name] for demand_pattern_name in temp_non_realtime_demand_pattern_list: pattern = get_pattern(name_c, demand_pattern_name) pattern['factors'][modify_index] *= temp_multiply_factor cs = ChangeSet() cs.append(pattern) set_pattern(name_c, cs) if globals.source_outflow_region: # 根据associated_source_outflow_id进行分区,各分区用(出厂的流量计 - 实时的pipe_flow和demand)进行数据更新 for region in globals.source_outflow_region.keys(): temp_source_outflow_region_id = globals.source_outflow_region_id.get(region, []) temp_realtime_region_pipe_flow_and_demand_id = globals.realtime_region_pipe_flow_and_demand_id.get(region, []) temp_source_outflow_region_patterns = globals.source_outflow_region_patterns.get(region, []) temp_realtime_region_pipe_flow_and_demand_patterns = globals.realtime_region_pipe_flow_and_demand_patterns.get(region, []) temp_non_realtime_region_patterns = globals.non_realtime_region_patterns.get(region, []) region_source_outflow_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( query_ids_list=temp_source_outflow_region_id, query_time=modify_pattern_start_time) region_realtime_region_pipe_flow_and_demand_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( query_ids_list=temp_realtime_region_pipe_flow_and_demand_id, query_time=modify_pattern_start_time) region_total_source_outflow = sum(float(value) for value in region_source_outflow_data_dict.values()) history_region_total_source_outflow = 0 for source_outflow_pattern_name in temp_source_outflow_region_patterns: temp_history_source_outflow_list = get_history_pattern_info(name_c, source_outflow_pattern_name) history_region_total_source_outflow += temp_history_source_outflow_list[modify_index] region_total_realtime_region_pipe_flow_and_demand = sum(float(value) for value in region_realtime_region_pipe_flow_and_demand_data_dict.values()) history_region_total_realtime_region_pipe_flow_and_demand = 0 for pipe_flow_and_demand_pattern_name in temp_realtime_region_pipe_flow_and_demand_patterns: temp_history_pipe_flow_and_demand_list = get_history_pattern_info(name_c, pipe_flow_and_demand_pattern_name) history_region_total_realtime_region_pipe_flow_and_demand += temp_history_pipe_flow_and_demand_list[modify_index] if (region_total_source_outflow - region_total_realtime_region_pipe_flow_and_demand): temp_multiply_factor = (region_total_source_outflow - region_total_realtime_region_pipe_flow_and_demand) / (history_region_total_source_outflow - history_region_total_realtime_region_pipe_flow_and_demand) for non_realtime_region_pattern_name in temp_non_realtime_region_patterns: pattern = get_pattern(name_c, non_realtime_region_pattern_name) pattern['factors'][modify_index] *= temp_multiply_factor cs = ChangeSet() cs.append(pattern) set_pattern(name_c, cs) # 根据高压出厂流量,更改高压用水模式 # hp_flow_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( # query_ids_list=list(hp_flow_pattern_id.values()), query_time=modify_pattern_start_time) # # hp_flow_dict = {key: hp_flow_SCADA_data_dict[value] for key, value in hp_flow_pattern_id.items()} # # all_valid = all(value and float(value) != 0 for value in hp_flow_dict.values()) # # if all_valid: # hp_total_SCADA_flow = sum(float(value) for value in hp_flow_dict.values()) # hp_total_history_flow = 0 # for pattern_name in hp_flow_dict.keys(): # history_flow_list = get_history_pattern_info(name_c, pattern_name) # hp_total_history_flow += history_flow_list[modify_index] # # multiply_factor1 = hp_total_SCADA_flow / hp_total_history_flow # hp_pattern_list = regions_patterns['hp'] # for pattern_name in hp_pattern_list: # pattern = get_pattern(name_c, pattern_name) # pattern['factors'][modify_index] *= multiply_factor1 # cs = ChangeSet() # cs.append(pattern) # set_pattern(name_c, cs) # # # 根据低压出厂流量,更改低压用水模式 # lp_flow_SCADA_data_dict = influxdb_api.query_SCADA_data_by_device_ID_and_time( # query_ids_list=list(lp_flow_pattern_id.values()), query_time=modify_pattern_start_time) # # lp_flow_dict = {key: lp_flow_SCADA_data_dict[value] for key, value in lp_flow_pattern_id.items()} # # all_valid2 = all(value and float(value) != 0 for value in lp_flow_dict.values()) # # if all_valid2: # lp_total_SCADA_flow = sum(float(value) for value in lp_flow_dict.values()) # lp_total_history_flow = 0 # for pattern_name in lp_flow_dict.keys(): # history_flow_list = get_history_pattern_info(name_c, pattern_name) # lp_total_history_flow += history_flow_list[modify_index] # # multiply_factor2 = lp_total_SCADA_flow / lp_total_history_flow # lp_pattern_list = regions_patterns['lp'] # for pattern_name in lp_pattern_list: # pattern = get_pattern(name_c, pattern_name) # pattern['factors'][modify_index] *= multiply_factor2 # cs = ChangeSet() # cs.append(pattern) # set_pattern(name_c, cs) # 运行并返回结果 print(f'Before run_project') result = run_project(name_c) # print(f'Simulation result : ' + result) time_cost_end = time.perf_counter() print('{} -- Hydraulic simulation finished, cost time: {:.2f} s.'.format( datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'), time_cost_end - time_cost_start)) close_project(name_c) time.sleep(5) # wait 5 seconds tmp_file = './temp/simulation.result.out' shutil.copy(f'./temp/{name_c}.db.opt', tmp_file) output = Output(tmp_file) node_result = output.node_results() link_result = output.link_results() # print(link_result[:3]) influxdb_api.store_realtime_simulation_result_to_influxdb(node_result, link_result, modify_pattern_start_time) if __name__ == "__main__": # 计算前,获取scada_info中的信息,按照设定的方法修改pg数据库 query_corresponding_element_id_and_query_id("bb") query_corresponding_pattern_id_and_query_id('bb') region_result = query_non_realtime_region('bb') print(region_result) globals.source_outflow_region_id = get_source_outflow_region_id('bb', region_result) globals.realtime_region_pipe_flow_and_demand_id = query_realtime_region_pipe_flow_and_demand_id('bb', region_result) globals.pipe_flow_region_patterns = query_pipe_flow_region_patterns('bb') globals.non_realtime_region_patterns = query_non_realtime_region_patterns('bb', region_result) globals.source_outflow_region_patterns, globals.realtime_region_pipe_flow_and_demand_patterns = get_realtime_region_patterns('bb', globals.source_outflow_region_id, globals.realtime_region_pipe_flow_and_demand_id) # 打印字典内容以验证 # print("Reservoirs ID:", globals.reservoirs_id) # print("Tanks ID:", globals.tanks_id) # print("Fixed Pumps ID:", globals.fixed_pumps_id) # print("Variable Pumps ID:", globals.variable_pumps_id) # print("Pressure ID:", globals.pressure_id) # print("Demand ID:", globals.demand_id) # print("Quality ID:", globals.quality_id) # print("Source Outflow Pattern ID:", globals.source_outflow_pattern_id) # print("Realtime Pipe Flow Pattern ID:", globals.realtime_pipe_flow_pattern_id) # print("Pipe Flow Region Patterns:", globals.pipe_flow_region_patterns) # print("Source Outflow Region:", region_result) # print('Source Outflow Region ID:', globals.source_outflow_region_id) # print('Source Outflow Region Patterns:', globals.source_outflow_region_patterns) # print("Non Realtime Region Patterns:", globals.non_realtime_region_patterns) # print("Realtime Region Pipe Flow And Demand ID:", globals.realtime_region_pipe_flow_and_demand_id) # print("Realtime Region Pipe Flow And Demand Patterns:", globals.realtime_region_pipe_flow_and_demand_patterns) run_simulation(name='bb', simulation_type="realtime", modify_pattern_start_time='2025-02-08T10:30:00+08:00')