DQN_env.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. import os
  2. import time
  3. import random
  4. import numpy as np
  5. import gymnasium as gym
  6. from gymnasium import spaces
  7. from stable_baselines3 import DQN
  8. from stable_baselines3.common.monitor import Monitor
  9. from stable_baselines3.common.vec_env import DummyVecEnv
  10. from stable_baselines3.common.callbacks import BaseCallback
  11. from typing import Dict, Tuple, Optional
  12. import torch
  13. import torch.nn as nn
  14. from dataclasses import dataclass, asdict
  15. from save_uf_models import TMPIncreaseModel, TMPDecreaseModel # 导入模型类
  16. import copy
  17. # ==== 定义膜的基础运行参数 ====
  18. @dataclass
  19. class UFParams:
  20. # —— 膜与运行参数 ——
  21. q_UF: float = 360.0 # 过滤进水流量(m^3/h)
  22. TMP0: float = 0.03 # 初始TMP(MPa)
  23. TMP_max: float = 0.06 # TMP硬上限(MPa)
  24. # —— 膜污染动力学 ——
  25. alpha: float = 1e-6 # TMP增长系数
  26. belta: float = 1.1 # 幂指数
  27. # —— 反洗参数(固定) ——
  28. q_bw_m3ph: float = 1000.0 # 物理反洗流量(m^3/h)
  29. # —— CEB参数(固定) ——
  30. T_ceb_interval_h: float = 48.0 # 固定每 k 小时做一次CEB
  31. v_ceb_m3: float = 30.0 # CEB用水体积(m^3)
  32. t_ceb_s: float = 40 * 60.0 # CEB时长(s)
  33. phi_ceb: float = 1.0 # CEB去除比例(简化:完全恢复到TMP0)
  34. # —— 约束与收敛 ——
  35. dTMP: float = 0.001 # 单次产水结束时,相对TMP0最大升幅(MPa)
  36. # —— 搜索范围(秒) ——
  37. L_min_s: float = 3800.0 # 过滤时长下限(s)
  38. L_max_s: float = 6000.0 # 过滤时长上限(s)
  39. t_bw_min_s: float = 40.0 # 物洗时长下限(s)
  40. t_bw_max_s: float = 60.0 # 物洗时长上限(s)
  41. # —— 物理反洗恢复函数参数 ——
  42. phi_bw_min: float = 0.7 # 物洗去除比例最小值
  43. phi_bw_max: float = 1.0 # 物洗去除比例最大值
  44. L_ref_s: float = 4000.0 # 过滤时长影响时间尺度
  45. tau_bw_s: float = 20.0 # 物洗时长影响时间尺度
  46. gamma_t: float = 1.0 # 物洗时长作用指数
  47. # —— 网格 ——
  48. L_step_s: float = 60.0 # 过滤时长步长(s)
  49. t_bw_step_s: float = 5.0 # 物洗时长步长(s)
  50. # 多目标加权及高TMP惩罚
  51. w_rec: float = 0.8 # 回收率权重
  52. w_rate: float = 0.2 # 净供水率权重
  53. w_headroom: float = 0.2 # 贴边惩罚权重
  54. r_headroom: float = 2.0 # 贴边惩罚幂次
  55. headroom_hardcap: float = 0.98 # 超过此比例直接视为不可取
  56. # ==== 定义强化学习超参数 ====
  57. @dataclass
  58. class DQNParams:
  59. """
  60. DQN 超参数定义类
  61. 用于统一管理模型训练参数
  62. """
  63. # 学习率,控制神经网络更新步长
  64. learning_rate: float = 1e-4
  65. # 经验回放缓冲区大小(步数)
  66. buffer_size: int = 10000
  67. # 学习开始前需要收集的步数
  68. learning_starts: int = 200
  69. # 每次从经验池中采样的样本数量
  70. batch_size: int = 32
  71. # 折扣因子,越接近1越重视长期奖励
  72. gamma: float = 0.95
  73. # 每隔多少步训练一次
  74. train_freq: int = 4
  75. # 目标网络更新间隔
  76. target_update_interval: int = 2000
  77. # 初始探索率 ε
  78. exploration_initial_eps: float = 1.0
  79. # 从初始ε衰减到最终ε所占的训练比例
  80. exploration_fraction: float = 0.3
  81. # 最终探索率 ε
  82. exploration_final_eps: float = 0.02
  83. # 日志备注(用于区分不同实验)
  84. remark: str = "default"
  85. # ==== 加载模拟环境模型 ====
  86. # 初始化模型
  87. model_fp = TMPIncreaseModel()
  88. model_bw = TMPDecreaseModel()
  89. # 加载参数
  90. model_fp.load_state_dict(torch.load("uf_fp.pth"))
  91. model_bw.load_state_dict(torch.load("uf_bw.pth"))
  92. # 切换到推理模式
  93. model_fp.eval()
  94. model_bw.eval()
  95. def _delta_tmp(p, L_h: float) -> float:
  96. """
  97. 过滤时段TMP上升量:调用 uf_fp.pth 模型
  98. """
  99. return model_fp(p, L_h)
  100. def phi_bw_of(p, L_s: float, t_bw_s: float) -> float:
  101. """
  102. 物洗去除比例:调用 uf_bw.pth 模型
  103. """
  104. return model_bw(p, L_s, t_bw_s)
  105. def _tmp_after_ceb(p, L_s: float, t_bw_s: float) -> float:
  106. """
  107. 计算化学清洗(CEB)后的TMP,当前为恢复初始跨膜压差
  108. """
  109. return p.TMP0
  110. def _v_bw_m3(p, t_bw_s: float) -> float:
  111. """
  112. 物理反洗水耗
  113. """
  114. return float(p.q_bw_m3ph * (float(t_bw_s) / 3600.0))
  115. def simulate_one_supercycle(p: UFParams, L_s: float, t_bw_s: float):
  116. """
  117. 返回 (是否可行, 指标字典)
  118. - 支持动态CEB次数:48h固定间隔
  119. - 增加日均产水时间和吨水电耗
  120. - 增加最小TMP记录
  121. """
  122. L_h = float(L_s) / 3600.0 # 小周期过滤时间(h)
  123. tmp = p.TMP0
  124. max_tmp_during_filtration = tmp
  125. min_tmp_during_filtration = tmp # 新增:初始化最小TMP
  126. max_residual_increase = 0.0
  127. # 小周期总时长(h)
  128. t_small_cycle_h = (L_s + t_bw_s) / 3600.0
  129. # 计算超级周期内CEB次数
  130. k_bw_per_ceb = int(np.floor(p.T_ceb_interval_h / t_small_cycle_h))
  131. if k_bw_per_ceb < 1:
  132. k_bw_per_ceb = 1 # 至少一个小周期
  133. # ton水电耗查表
  134. energy_lookup = {
  135. 3600: 0.1034, 3660: 0.1031, 3720: 0.1029, 3780: 0.1026,
  136. 3840: 0.1023, 3900: 0.1021, 3960: 0.1019, 4020: 0.1017,
  137. 4080: 0.1015, 4140: 0.1012, 4200: 0.1011
  138. }
  139. for _ in range(k_bw_per_ceb):
  140. tmp_run_start = tmp
  141. # 过滤阶段TMP增长
  142. dtmp = _delta_tmp(p, L_h)
  143. tmp_peak = tmp_run_start + dtmp
  144. # 约束1:峰值不得超过硬上限
  145. if tmp_peak > p.TMP_max + 1e-12:
  146. return False, {"reason": "TMP_max violated during filtration", "TMP_peak": tmp_peak}
  147. # 更新最大和最小TMP
  148. if tmp_peak > max_tmp_during_filtration:
  149. max_tmp_during_filtration = tmp_peak
  150. if tmp_run_start < min_tmp_during_filtration: # 新增:记录运行开始时的最小TMP
  151. min_tmp_during_filtration = tmp_run_start
  152. # 物理反洗
  153. phi = phi_bw_of(p, L_s, t_bw_s)
  154. tmp_after_bw = tmp_peak - phi * (tmp_peak - tmp_run_start)
  155. # 约束2:单次残余增量控制
  156. residual_inc = tmp_after_bw - tmp_run_start
  157. if residual_inc > p.dTMP + 1e-12:
  158. return False, {
  159. "reason": "residual TMP increase after BW exceeded dTMP",
  160. "residual_increase": residual_inc,
  161. "limit_dTMP": p.dTMP
  162. }
  163. if residual_inc > max_residual_increase:
  164. max_residual_increase = residual_inc
  165. tmp = tmp_after_bw
  166. # CEB
  167. tmp_after_ceb = p.TMP0
  168. # 体积与回收率
  169. V_feed_super = k_bw_per_ceb * p.q_UF * L_h
  170. V_loss_super = k_bw_per_ceb * _v_bw_m3(p, t_bw_s) + p.v_ceb_m3
  171. V_net = max(0.0, V_feed_super - V_loss_super)
  172. recovery = max(0.0, V_net / max(V_feed_super, 1e-12))
  173. # 时间与净供水率
  174. T_super_h = k_bw_per_ceb * (L_s + t_bw_s) / 3600.0 + p.t_ceb_s / 3600.0
  175. net_delivery_rate_m3ph = V_net / max(T_super_h, 1e-12)
  176. # 贴边比例与硬限
  177. headroom_ratio = max_tmp_during_filtration / max(p.TMP_max, 1e-12)
  178. if headroom_ratio > p.headroom_hardcap + 1e-12:
  179. return False, {"reason": "headroom hardcap exceeded", "headroom_ratio": headroom_ratio}
  180. # —— 新增指标 1:日均产水时间(h/d) ——
  181. daily_prod_time_h = k_bw_per_ceb * L_h / T_super_h * 24.0
  182. # —— 新增指标 2:吨水电耗(kWh/m³) ——
  183. closest_L = min(energy_lookup.keys(), key=lambda x: abs(x - L_s))
  184. ton_water_energy = energy_lookup[closest_L]
  185. info = {
  186. "recovery": recovery,
  187. "V_feed_super_m3": V_feed_super,
  188. "V_loss_super_m3": V_loss_super,
  189. "V_net_super_m3": V_net,
  190. "supercycle_time_h": T_super_h,
  191. "net_delivery_rate_m3ph": net_delivery_rate_m3ph,
  192. "max_TMP_during_filtration": max_tmp_during_filtration,
  193. "min_TMP_during_filtration": min_tmp_during_filtration, # 新增:最小TMP
  194. "max_residual_increase_per_run": max_residual_increase,
  195. "phi_bw_effective": phi,
  196. "TMP_after_ceb": tmp_after_ceb,
  197. "headroom_ratio": headroom_ratio,
  198. "daily_prod_time_h": daily_prod_time_h,
  199. "ton_water_energy_kWh_per_m3": ton_water_energy,
  200. "k_bw_per_ceb": k_bw_per_ceb
  201. }
  202. return True, info
  203. def _score(p: UFParams, rec: dict) -> float:
  204. """综合评分:越大越好。通过非线性放大奖励差异,强化区分好坏动作"""
  205. # —— 无量纲化净供水率 ——
  206. rate_norm = rec["net_delivery_rate_m3ph"] / max(p.q_UF, 1e-12)
  207. # —— TMP soft penalty (sigmoid) ——
  208. tmp_ratio = rec["max_TMP_during_filtration"] / max(p.TMP_max, 1e-12)
  209. k = 10.0
  210. headroom_penalty = 1.0 / (1.0 + np.exp(-k * (tmp_ratio - 1.0)))
  211. # —— 基础 reward(0.6~0.9左右)——
  212. base_reward = (
  213. p.w_rec * rec["recovery"]
  214. + p.w_rate * rate_norm
  215. - p.w_headroom * headroom_penalty
  216. )
  217. # —— 非线性放大:平方映射 + 缩放 ——
  218. # 目的是放大好坏动作差异,同时限制最大值,避免 TD-error 过大
  219. amplified_reward = (base_reward - 0.5) ** 2 * 5.0
  220. # —— 可选:保留符号,区分负奖励
  221. if base_reward < 0.5:
  222. amplified_reward = -amplified_reward
  223. return amplified_reward
  224. def set_global_seed(seed: int):
  225. """固定全局随机种子,保证训练可复现"""
  226. random.seed(seed)
  227. np.random.seed(seed)
  228. torch.manual_seed(seed)
  229. torch.cuda.manual_seed_all(seed) # 如果使用GPU
  230. torch.backends.cudnn.deterministic = True
  231. torch.backends.cudnn.benchmark = False
  232. class UFSuperCycleEnv(gym.Env):
  233. """超滤系统环境(超级周期级别决策)"""
  234. metadata = {"render_modes": ["human"]}
  235. def __init__(self, base_params, max_episode_steps: int = 20):
  236. super(UFSuperCycleEnv, self).__init__()
  237. self.base_params = base_params
  238. self.current_params = copy.deepcopy(base_params)
  239. self.max_episode_steps = max_episode_steps
  240. self.current_step = 0
  241. # 计算离散动作空间
  242. self.L_values = np.arange(
  243. self.base_params.L_min_s,
  244. self.base_params.L_max_s + self.base_params.L_step_s,
  245. self.base_params.L_step_s
  246. )
  247. self.t_bw_values = np.arange(
  248. self.base_params.t_bw_min_s,
  249. self.base_params.t_bw_max_s + self.base_params.t_bw_step_s,
  250. self.base_params.t_bw_step_s
  251. )
  252. self.num_L = len(self.L_values)
  253. self.num_bw = len(self.t_bw_values)
  254. # 单一离散动作空间
  255. self.action_space = spaces.Discrete(self.num_L * self.num_bw)
  256. # 状态空间增加 TMP0, 上一次动作(L_s, t_bw_s), 本周期最高 TMP
  257. # 状态归一化均在 _get_obs 内处理
  258. self.observation_space = spaces.Box(
  259. low=np.zeros(4, dtype=np.float32),
  260. high=np.ones(4, dtype=np.float32),
  261. dtype=np.float32
  262. )
  263. # 初始化状态
  264. self.last_action = (self.base_params.L_min_s, self.base_params.t_bw_min_s)
  265. self.max_TMP_during_filtration = self.current_params.TMP0
  266. self.reset(seed=None)
  267. def _get_obs(self):
  268. TMP0 = self.current_params.TMP0
  269. TMP0_norm = (TMP0 - 0.01) / (0.05 - 0.01)
  270. L_s, t_bw_s = self.last_action
  271. L_norm = (L_s - self.base_params.L_min_s) / (self.base_params.L_max_s - self.base_params.L_min_s)
  272. t_bw_norm = (t_bw_s - self.base_params.t_bw_min_s) / (self.base_params.t_bw_max_s - self.base_params.t_bw_min_s)
  273. max_TMP_norm = (self.max_TMP_during_filtration - 0.01) / (0.05 - 0.01)
  274. return np.array([TMP0_norm, L_norm, t_bw_norm, max_TMP_norm], dtype=np.float32)
  275. def _get_action_values(self, action):
  276. L_idx = action // self.num_bw
  277. t_bw_idx = action % self.num_bw
  278. return self.L_values[L_idx], self.t_bw_values[t_bw_idx]
  279. def reset(self, seed=None, options=None):
  280. super().reset(seed=seed)
  281. self.current_params.TMP0 = np.random.uniform(0.01, 0.03)
  282. self.current_step = 0
  283. self.last_action = (self.base_params.L_min_s, self.base_params.t_bw_min_s)
  284. self.max_TMP_during_filtration = self.current_params.TMP0
  285. return self._get_obs(), {}
  286. def step(self, action):
  287. self.current_step += 1
  288. L_s, t_bw_s = self._get_action_values(action)
  289. L_s = np.clip(L_s, self.base_params.L_min_s, self.base_params.L_max_s)
  290. t_bw_s = np.clip(t_bw_s, self.base_params.t_bw_min_s, self.base_params.t_bw_max_s)
  291. # 模拟超级周期
  292. feasible, info = simulate_one_supercycle(self.current_params, L_s, t_bw_s)
  293. if feasible:
  294. reward = _score(self.current_params, info)
  295. self.current_params.TMP0 = info["TMP_after_ceb"]
  296. self.max_TMP_during_filtration = info["max_TMP_during_filtration"]
  297. terminated = False
  298. else:
  299. reward = -20
  300. terminated = True
  301. truncated = self.current_step >= self.max_episode_steps
  302. self.last_action = (L_s, t_bw_s)
  303. next_obs = self._get_obs()
  304. info["feasible"] = feasible
  305. info["step"] = self.current_step
  306. return next_obs, reward, terminated, truncated, info
  307. class UFEpisodeRecorder:
  308. """记录episode中的决策和结果"""
  309. def __init__(self):
  310. self.episode_data = []
  311. self.current_episode = []
  312. def record_step(self, obs, action, reward, done, info):
  313. """记录一步"""
  314. step_data = {
  315. "obs": obs.copy(),
  316. "action": action.copy(),
  317. "reward": reward,
  318. "done": done,
  319. "info": info.copy() if info else {}
  320. }
  321. self.current_episode.append(step_data)
  322. if done:
  323. self.episode_data.append(self.current_episode)
  324. self.current_episode = []
  325. def get_episode_stats(self, episode_idx=-1):
  326. """获取episode统计信息"""
  327. if not self.episode_data:
  328. return {}
  329. episode = self.episode_data[episode_idx]
  330. total_reward = sum(step["reward"] for step in episode)
  331. avg_recovery = np.mean([step["info"].get("recovery", 0) for step in episode if "recovery" in step["info"]])
  332. feasible_steps = sum(1 for step in episode if step["info"].get("feasible", False))
  333. return {
  334. "total_reward": total_reward,
  335. "avg_recovery": avg_recovery,
  336. "feasible_steps": feasible_steps,
  337. "total_steps": len(episode)
  338. }
  339. class UFTrainingCallback(BaseCallback):
  340. """
  341. PPO 训练回调,用于记录每一步的数据到 recorder。
  342. 相比原来的 RecordingCallback,更加合理和健壮:
  343. 1. 不依赖环境内部 last_* 属性
  344. 2. 使用 PPO 提供的 obs、actions、rewards、dones、infos
  345. 3. 自动处理 episode 结束时的统计
  346. """
  347. def __init__(self, recorder, verbose=0):
  348. super(UFTrainingCallback, self).__init__(verbose)
  349. self.recorder = recorder
  350. def _on_step(self) -> bool:
  351. try:
  352. new_obs = self.locals.get("new_obs")
  353. actions = self.locals.get("actions")
  354. rewards = self.locals.get("rewards")
  355. dones = self.locals.get("dones")
  356. infos = self.locals.get("infos")
  357. if len(new_obs) > 0:
  358. step_obs = new_obs[0]
  359. step_action = actions[0] if actions is not None else None
  360. step_reward = rewards[0] if rewards is not None else 0.0
  361. step_done = dones[0] if dones is not None else False
  362. step_info = infos[0] if infos is not None else {}
  363. # 打印当前 step 的信息
  364. if self.verbose:
  365. print(f"[Step {self.num_timesteps}] 动作={step_action}, 奖励={step_reward:.3f}, Done={step_done}")
  366. # 记录数据
  367. self.recorder.record_step(
  368. obs=step_obs,
  369. action=step_action,
  370. reward=step_reward,
  371. done=step_done,
  372. info=step_info,
  373. )
  374. except Exception as e:
  375. if self.verbose:
  376. print(f"[Callback Error] {e}")
  377. return True
  378. class DQNTrainer:
  379. def __init__(self, env, params, callback=None):
  380. self.env = env
  381. self.params = params
  382. self.callback = callback
  383. self.log_dir = self._create_log_dir()
  384. self.model = self._create_model()
  385. def _create_log_dir(self):
  386. timestamp = time.strftime("%Y%m%d-%H%M%S")
  387. log_name = (
  388. f"DQN_lr{self.params.learning_rate}_buf{self.params.buffer_size}_bs{self.params.batch_size}"
  389. f"_gamma{self.params.gamma}_exp{self.params.exploration_fraction}"
  390. f"_{self.params.remark}_{timestamp}"
  391. )
  392. log_dir = os.path.join("./uf_dqn_tensorboard", log_name)
  393. os.makedirs(log_dir, exist_ok=True)
  394. return log_dir
  395. def _create_model(self):
  396. return DQN(
  397. policy="MlpPolicy",
  398. env=self.env,
  399. learning_rate=self.params.learning_rate,
  400. buffer_size=self.params.buffer_size, # 大缓冲保证经验多样性
  401. learning_starts=self.params.learning_starts,
  402. batch_size=self.params.batch_size,
  403. gamma=self.params.gamma,
  404. train_freq=self.params.train_freq,
  405. target_update_interval=1,
  406. tau=0.005,
  407. exploration_initial_eps=self.params.exploration_initial_eps,
  408. exploration_fraction=self.params.exploration_fraction,
  409. exploration_final_eps=self.params.exploration_final_eps,
  410. verbose=1,
  411. tensorboard_log=self.log_dir
  412. # 不再指定 replay_buffer_class,默认使用 ReplayBuffer
  413. )
  414. def train(self, total_timesteps: int):
  415. if self.callback:
  416. self.model.learn(total_timesteps=total_timesteps, callback=self.callback)
  417. else:
  418. self.model.learn(total_timesteps=total_timesteps)
  419. print(f"模型训练完成,日志保存在:{self.log_dir}")
  420. def save(self, path=None):
  421. if path is None:
  422. path = os.path.join(self.log_dir, "dqn_model.zip")
  423. self.model.save(path)
  424. print(f"模型已保存到:{path}")
  425. def load(self, path):
  426. self.model = DQN.load(path, env=self.env)
  427. print(f"模型已从 {path} 加载")
  428. def train_uf_rl_agent(params: UFParams, total_timesteps: int = 10000, seed: int = 2025):
  429. set_global_seed(seed)
  430. recorder = UFEpisodeRecorder()
  431. callback = UFTrainingCallback(recorder, verbose=1)
  432. def make_env():
  433. env = UFSuperCycleEnv(params)
  434. env = Monitor(env)
  435. return env
  436. env = DummyVecEnv([make_env])
  437. dqn_params = DQNParams()
  438. trainer = DQNTrainer(env, dqn_params, callback=callback)
  439. trainer.train(total_timesteps)
  440. trainer.save()
  441. stats = callback.recorder.get_episode_stats()
  442. print(f"训练完成 - 总奖励: {stats.get('total_reward', 0):.2f}, 平均回收率: {stats.get('avg_recovery', 0):.3f}")
  443. return trainer.model
  444. # 训练和测试示例
  445. if __name__ == "__main__":
  446. # 初始化参数
  447. params = UFParams()
  448. # 训练RL代理
  449. print("开始训练RL代理...")
  450. train_uf_rl_agent(params, total_timesteps=50000)