predict.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. # predict.py
  2. import os
  3. import torch
  4. import joblib
  5. import pandas as pd
  6. import numpy as np
  7. from datetime import datetime, timedelta
  8. from gat_lstm import GAT_LSTM
  9. class RealTimePredictor:
  10. def __init__(self, model_path='model.pth', scaler_path='scaler.pkl', device=None):
  11. """
  12. 初始化预测器
  13. """
  14. # 1. 参数配置 (与训练 args.py 保持一致)
  15. self.seq_len = 10 # 输入序列长度
  16. self.feature_num = 42 # 输入特征数 (4时间编码 + 38业务特征)
  17. self.labels_num = 4 # 输出标签数
  18. self.hidden_size = 64
  19. self.num_layers = 1
  20. self.output_size = 5 # 预测未来 5 步
  21. self.dropout = 0
  22. # 2. 设备与资源加载
  23. self.device = device if device else torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
  24. self.model_path = model_path
  25. self.scaler_path = scaler_path
  26. # 加载归一化器
  27. if not os.path.exists(self.scaler_path):
  28. raise FileNotFoundError(f"未找到归一化文件: {self.scaler_path},请确保已完成训练。")
  29. self.scaler = joblib.load(self.scaler_path)
  30. # 加载模型
  31. self._load_model()
  32. # 定义必须存在的列名 (39个,包含index,顺序必须固定)
  33. self.required_columns = [
  34. 'index',
  35. "AR.1#UF_JSFLOW_O", # 1#UF进水流量
  36. "AR.2#UF_JSFLOW_O", # 2#UF进水流量
  37. "AR.1#RO_JSFLOW_O", # 1#RO进水流量
  38. "AR.2#RO_JSFLOW_O", # 2#RO进水流量
  39. "AR.1#UF_JSPRESS_O", # 1#UF进水压力
  40. "AR.2#UF_JSPRESS_O", # 2#UF进水压力
  41. "AR.1#RO_JSPRESS_O", # 1#RO进水压力
  42. "AR.2#RO_JSPRESS_O", # 2#RO进水压力
  43. "AR.1#RO_EDJSPRESS_O", # 1#RO二段进水压力
  44. "AR.1#RO_SDJSPRESS_O", # 1#RO三段进水压力
  45. "AR.2#RO_EDJSPRESS_O", # 2#RO二段进水压力
  46. "AR.2#RO_SDJSPRESS_O", # 2#RO三段进水压力
  47. "AR.ZJS_TEMP_O", # 进水温度
  48. "AR.ZJS_ZD_O", # UF进水浊度
  49. "AR.RO_JSDD_O", # RO进水电导
  50. "AR.RO_JSORP_O", # RO进水ORP
  51. "AR.RO_JSPH_O", # RO进水PH
  52. "AR.1#UF_V_FB_O", # 1#UF调节阀开度反馈
  53. "AR.2#UF_V_FB_O", # 2#UF调节阀开度反馈
  54. "AR.1#UFBWB_FRE_FB_O", # 1#UF反洗泵频率反馈
  55. "AR.2#UFBWB_FRE_FB_O", # 2#UF反洗泵频率反馈
  56. "AR.1#RODJB_FRE_FB_O", # 1#RO段间泵频率反馈
  57. "AR.1#ROGYB_FRE_FB_O", # 1#RO高压泵频率反馈
  58. "AR.1#RODJB_CZ_O", # 1#RO段间泵测振反馈
  59. "AR.1#ROGYB_CZ_O", # 1#RO高压泵测振反馈
  60. "AR.2#RODJB_CZ_O", # 2#RO段间泵测振反馈
  61. "AR.2#ROGYB_CZ_O", # 2#RO高压泵测振反馈
  62. "AR.ROGSB_FRE_FB_O", # RO供水泵频率反馈
  63. "AR.UFGSB_FRE_FB_O", # UF供水泵频率反馈
  64. "AR.V_UF1_TJV_KD_FB", # UF1调节阀开度反馈
  65. "AR.V_UF2_TJV_KD_FB", # UF2调节阀开度反馈
  66. "AR.CS_LEVEL_O", # RO产水箱液位
  67. "AR.UF_CSLEVEL_O", # UF产水箱液位
  68. "AR.UF1_SSD_KMYC", # UF1跨膜压差
  69. "AR.UF2_SSD_KMYC", # UF2跨膜压差
  70. "AR.RO1_2D_YC", # RO1二段压差
  71. "AR.PUBLIC_BY_REAL_1", # RO1三段压差
  72. "1#RO_CSFLOW", # 1#RO产水流量
  73. ]
  74. # 用于防空值兜底机制的变量
  75. self.raw_input_data = None
  76. self.target_columns = self.required_columns[-self.labels_num:]
  77. def _load_model(self):
  78. """内部方法:加载模型权重"""
  79. class ModelArgs: pass
  80. args = ModelArgs()
  81. args.feature_num = self.feature_num
  82. args.hidden_size = self.hidden_size
  83. args.num_layers = self.num_layers
  84. args.output_size = self.output_size
  85. args.labels_num = self.labels_num
  86. args.dropout = self.dropout
  87. self.model = GAT_LSTM(args).to(self.device)
  88. # 加载 edge_index.pt
  89. if os.path.exists('edge_index.pt'):
  90. edge_index = torch.load('edge_index.pt', map_location=self.device, weights_only=True)
  91. self.model.set_edge_index(edge_index)
  92. if not os.path.exists(self.model_path):
  93. raise FileNotFoundError(f"未找到模型权重文件: {self.model_path}")
  94. state_dict = torch.load(self.model_path, map_location=self.device, weights_only=True)
  95. self.model.load_state_dict(state_dict)
  96. self.model.eval()
  97. def _preprocess(self, df):
  98. """数据预处理:补全、排序、生成时间特征、整体归一化"""
  99. data = df.copy()
  100. # 1. 统一时间列名
  101. if 'datetime' in data.columns:
  102. data = data.rename(columns={'datetime': 'index'})
  103. if 'index' not in data.columns:
  104. data['index'] = pd.date_range(end=datetime.now(), periods=len(data), freq='min')
  105. data['index'] = pd.to_datetime(data['index'])
  106. # 2. 补全长度 (Padding)
  107. if len(data) < self.seq_len:
  108. pad_len = self.seq_len - len(data)
  109. first_row = data.iloc[0:1]
  110. pads = pd.concat([first_row] * pad_len, ignore_index=True)
  111. start_time = data['index'].iloc[0]
  112. for i in range(pad_len):
  113. pads.at[i, 'index'] = start_time - timedelta(minutes=(pad_len-i))
  114. data = pd.concat([pads, data], ignore_index=True)
  115. # 3. 列筛选排序 (提取业务数据,不含index)
  116. try:
  117. # required_columns[0] 是 'index',我们取后面的业务列
  118. business_cols = self.required_columns[1:]
  119. data_business = data[business_cols].copy()
  120. # 策略: 前向填充 -> 后向填充 -> 填充为0
  121. data_business = data_business.ffill().bfill().fillna(0.0)
  122. # ==========================================
  123. except KeyError:
  124. missing = list(set(self.required_columns) - set(data.columns))
  125. raise ValueError(f"缺少列: {missing}")
  126. # 4. 生成时间特征
  127. date_col = data['index']
  128. minute_of_day = date_col.dt.hour * 60 + date_col.dt.minute
  129. day_of_year = date_col.dt.dayofyear
  130. time_features = pd.DataFrame({
  131. 'minute_sin': np.sin(2 * np.pi * minute_of_day / 1440),
  132. 'minute_cos': np.cos(2 * np.pi * minute_of_day / 1440),
  133. 'day_year_sin': np.sin(2 * np.pi * day_of_year / 366),
  134. 'day_year_cos': np.cos(2 * np.pi * day_of_year / 366)
  135. })
  136. # 5. 拼接:[时间特征 + 业务特征]
  137. # 注意:训练时的顺序是 time_features + other_columns
  138. # 必须重置索引以避免拼接错位
  139. data_to_scale = pd.concat([
  140. time_features.reset_index(drop=True),
  141. data_business.reset_index(drop=True)
  142. ], axis=1)
  143. # 6. 整体归一化
  144. # 此时 columns 应该包含: minute_sin, minute_cos..., AR.1#UF_JSFLOW_O...
  145. # 顺序和名字必须与 fit 时一致
  146. scaled_array = self.scaler.transform(data_to_scale)
  147. return scaled_array
  148. # --- 备用防空值兜底函数 ---
  149. def get_recent_values_as_fallback(self):
  150. """从原始输入数据中获取最近的output_size条记录作为备用输出,避免输出空值"""
  151. if self.raw_input_data is None or self.raw_input_data.empty:
  152. return np.zeros((self.output_size, self.labels_num))
  153. df_copy = self.raw_input_data.copy()
  154. # 统一时间列格式,防止报错
  155. if 'datetime' in df_copy.columns:
  156. df_copy = df_copy.rename(columns={'datetime': 'index'})
  157. if 'index' not in df_copy.columns:
  158. df_copy['index'] = pd.date_range(end=datetime.now(), periods=len(df_copy), freq='min')
  159. df_copy['index'] = pd.to_datetime(df_copy['index'])
  160. # 按时间排序并取最近的output_size条
  161. recent_data = df_copy.sort_values('index').tail(self.output_size)
  162. # 若数据不足,用最后一条补充
  163. if len(recent_data) < self.output_size:
  164. last_row = recent_data.iloc[-1:] if not recent_data.empty else pd.DataFrame(
  165. {col: [0.0] for col in self.target_columns}, index=[0])
  166. while len(recent_data) < self.output_size:
  167. recent_data = pd.concat([recent_data, last_row], ignore_index=True)
  168. # 确保提取的兜底数据中没有空值 (NaN)
  169. recent_data[self.target_columns] = recent_data[self.target_columns].ffill().bfill().fillna(0.0)
  170. # 提取目标列值并返回
  171. try:
  172. fallback_values = recent_data[self.target_columns].values
  173. except KeyError:
  174. # 极度异常情况兜底(输入中缺少目标列)
  175. fallback_values = np.zeros((self.output_size, self.labels_num))
  176. return fallback_values
  177. def predict(self, df):
  178. """
  179. 返回: List[List[float]]
  180. 格式: [[t+1时刻的4个值], [t+2时刻的4个值], ..., [t+5时刻的4个值]]
  181. """
  182. # --- 保存原始输入数据用于可能的降级策略 ---
  183. self.raw_input_data = df.copy()
  184. # 1. 预处理 (返回的是归一化后的 numpy 数组)
  185. processed_data = self._preprocess(df)
  186. # 2. 取最后 seq_len 个时间步构建 Tensor
  187. input_seq = processed_data[-self.seq_len:]
  188. input_tensor = torch.tensor(input_seq, dtype=torch.float32).unsqueeze(0).to(self.device)
  189. # 3. 推理
  190. with torch.no_grad():
  191. output = self.model(input_tensor)
  192. # 4. 反归一化
  193. # 输出形状调整为 (5, 4) -> 5个步长, 4个变量
  194. preds = output.cpu().numpy().reshape(self.output_size, self.labels_num)
  195. # 获取最后4列的归一化参数 (目标变量)
  196. target_min = self.scaler.min_[-self.labels_num:]
  197. target_scale = self.scaler.scale_[-self.labels_num:]
  198. real_preds = (preds - target_min) / target_scale
  199. real_preds = np.abs(real_preds)
  200. # --- 空值/NaN 检测与兜底机制 ---
  201. # 如果模型因极端情况输出 NaN 或者 inf 无穷大,触发历史数据兜底
  202. if np.isnan(real_preds).any() or np.isinf(real_preds).any():
  203. real_preds = self.get_recent_values_as_fallback()
  204. # 5. 返回纯数值列表
  205. return real_preds.tolist()
  206. if __name__ == "__main__":
  207. # 测试代码
  208. try:
  209. # 初始化
  210. predictor = RealTimePredictor()
  211. # 生成模拟数据
  212. mock_data = pd.DataFrame()
  213. mock_data['index'] = pd.date_range(end=datetime.now(), periods=15, freq='min')
  214. for col in predictor.required_columns[1:]:
  215. mock_data[col] = np.random.rand(15) * 10
  216. # 人为制造空值测试鲁棒性
  217. mock_data.loc[3:6, "AR.1#UF_JSFLOW_O"] = np.nan
  218. mock_data.loc[12, predictor.target_columns[0]] = np.nan
  219. # 预测
  220. result = predictor.predict(mock_data)
  221. print("预测结果 (5x4 数组):")
  222. print(result)
  223. except Exception as e:
  224. print(f"Error: {e}")
  225. import traceback
  226. traceback.print_exc()