2 Commitit e273c714eb ... c694f02849

Tekijä SHA1 Viesti Päivämäärä
  junc_WHU c694f02849 feat:真实工厂数据测试版调用代码 3 kuukautta sitten
  junc_WHU eba5aa9521 feat:真实工厂数据测试版 3 kuukautta sitten

+ 29 - 12
models/uf-rl/uf_train/env/env_params.py

@@ -191,12 +191,12 @@ class UFState:
     # ========== 膜阻力模型参数 ==========
     # 这些参数描述膜污染的物理化学特性,基于历史数据拟合得到
 
-    nuK: float = 6.92e+01
+    nuK: float = 1.7e+02
     # 过滤阶段膜阻力增长系数(缩放后单位)
     # 说明:反映水质污染特性,nuK 越大表示水质越差、膜污染越快
     # 物理意义:单位膜通量、单位时间的阻力增长速率
 
-    slope: float = 3.44e-01
+    slope: float = 2
     # 全周期不可逆污染增长斜率
     # 说明:描述长期不可逆污染的累积速率(幂律模型的系数)
 
@@ -205,7 +205,7 @@ class UFState:
     # 说明:描述长期污染的非线性特性(幂律模型的指数)
     # power > 1 表示污染加速累积,power < 1 表示污染增速放缓
 
-    ceb_removal: float = 150
+    ceb_removal: float = 100
     # 化学增强反洗(CEB)可去除的膜阻力(缩放后单位)
     # 说明:CEB 比物理反洗更彻底,可去除部分不可逆污染
 
@@ -281,30 +281,47 @@ class UFActionSpec:
 @dataclass(frozen=True)
 class UFRewardParams:
     """
-    【奖励函数与安全约束参数】
-    用于 reward 计算和 episode 终止判断。
+    【奖励函数与安全约束参数】用于 reward 计算和 episode 终止判断。
     """
 
+    # ------------------ TMP 限值 ------------------
     global_TMP_hard_limit: float = 0.08
     # TMP 硬上限(MPa)
-    # 说明:超过此值将导致episode失败,需立即停机
+    # 说明:超过此值将导致 episode 失败,需立即停机
 
     global_TMP_soft_limit: float = 0.06
     # TMP 软上限 (MPa)
-    # 说明:此上限用于指导奖励函数中膜阻力允许上升值,越接近该上限,系统对膜阻力上升控制的更严格
+    # 说明:此上限用于指导奖励函数中膜阻力允许上升值,
+    # 越接近该上限,系统对膜阻力上升控制的更严格
 
+    # ------------------ 回收率奖励参数 ------------------
     k_rec: float = 5.0
     # 回收率敏感度系数(控制回收率奖励的陡峭程度)
 
-    k_res: float = 10.0
-    # 残余污染敏感度系数(控制污染惩罚的陡峭程度)
-
     rec_low: float = 0.92
     rec_high: float = 0.99
-    # 回收率正常范围
+    # 回收率正常范围,用于归一化奖励
+
+    # ------------------ 残余污染奖励参数 ------------------
+    k_res: float = 10.0
+    # 残余污染敏感度系数(控制污染惩罚的陡峭程度)
 
     rr0: float = 0.08
-    # 残余污染比例参考值
+    # 残余污染比例参考值(原设计参考点)
+    # 在新设计中,每步允许上升比例由 1/max_episode_steps 决定,可覆盖原值
+
+    # ------------------ 吨水电耗奖励参数 ------------------
+    k_energy: float = 5.0
+    # 吨水电耗敏感度系数(控制电耗奖励/惩罚陡峭程度)
+
+    energy_low: float = 0.0993
+    energy_high: float = 0.1034
+    energy_ref: float = 0.1011
+    # 电耗参考值
+    # energy_low: 低耗电,奖励较高
+    # energy_high: 高耗电,奖励为负
+    # energy_ref: 平衡点,对应奖励为 0
+
 
 @dataclass(frozen=True)
 class UFStateBounds:

+ 15 - 1
models/uf-rl/uf_train/env/env_visual.py

@@ -2,6 +2,7 @@ import numpy as np
 from stable_baselines3.common.callbacks import BaseCallback
 
 
+
 class UFEpisodeRecorder:
     """记录episode中的决策和结果"""
 
@@ -69,10 +70,23 @@ class UFTrainingCallback(BaseCallback):
                 step_reward = rewards[0] if rewards is not None else 0.0
                 step_done = dones[0] if dones is not None else False
                 step_info = infos[0] if infos is not None else {}
+                L_s = step_info["L_s"]
+                t_bw_s = step_info["t_bw_s"]
+                initial_tmp = step_info["initial_tmp"]
+                max_TMP_during_filtration = step_info["max_TMP_during_filtration"]
+                tmp_after_ceb = step_info["tmp_after_ceb"]
+                residual_ratio =step_info["residual_ratio"]
+                rec_reward = step_info["rec_reward"]
+                energy_reward = step_info["energy_reward"]
+                recovery = step_info["recovery"]
+                res_penalty = step_info["res_penalty"]
 
                 # 打印当前 step 的信息
                 if self.verbose:
-                    print(f"[Step {self.num_timesteps}] 动作={step_action}, 奖励={step_reward:.3f}, Done={step_done}")
+                    print(f"[Step {self.num_timesteps}] 动作={step_action}, 奖励={step_reward:.3f}, Done={step_done}, L_s={L_s}, t_bw_s={t_bw_s},"
+                          f"residual_ratio = {residual_ratio:.4f},recovery = {recovery:.4f},"
+                          f"rec_reward = {rec_reward:.4f}, energy_reward = {energy_reward:.4f}, res_penalty = {res_penalty:.4f},"
+                          f"initial_tmp = {initial_tmp:.4f}, max_TMP_during_filtration ={max_TMP_during_filtration:.4f}, tmp_after_ceb = {tmp_after_ceb:.4f} ")
 
                 # 记录数据
                 self.recorder.record_step(

+ 74 - 35
models/uf-rl/uf_train/env/uf_env.py

@@ -223,28 +223,28 @@ class UFSuperCycleEnv(gym.Env):
         self.last_action = None
         self.max_TMP_during_filtration = self.state.TMP
 
-        return self._get_obs(), {}
+        return self.get_obs(self.state), {}
 
     def _get_state_copy(self):
         return copy.deepcopy(self.state)
 
-    def _get_obs(self):
+    def get_obs(self, state):
         """
         构建当前环境归一化状态向量
         """
-        # === 1. 从 self.state 读取动态参数 ===
-        TMP = self.state.TMP
-        q_UF = self.state.q_UF
-        temp = self.state.temp
+        # === 1. 从 state 读取动态参数 ===
+        TMP = state.TMP
+        q_UF = state.q_UF
+        temp = state.temp
 
         # === 2. 计算本周期初始膜阻力 ===
-        R = self.state.R
+        R = state.R
 
         # === 3. 从 self.state 读取膜阻力增长模型参数 ===
-        nuk = self.state.nuK
-        slope = self.state.slope
-        power = self.state.power
-        ceb_removal = self.state.ceb_removal
+        nuk = state.nuK
+        slope = state.slope
+        power = state.power
+        ceb_removal = state.ceb_removal
 
         # === 4. 从 current_params 动态读取上下限 ===
         TMP0_min, TMP0_max = self.state_bounds.TMP0_min, self.state_bounds.global_TMP_hard_limit
@@ -282,7 +282,7 @@ class UFSuperCycleEnv(gym.Env):
 
         return obs
 
-    def _get_action_values(self, action):
+    def get_action_values(self, action):
         """
         将动作还原为实际时长
         """
@@ -292,7 +292,7 @@ class UFSuperCycleEnv(gym.Env):
 
     def step(self, action):
         self.current_step += 1
-        L_s, t_bw_s = self._get_action_values(action)
+        L_s, t_bw_s = self.get_action_values(action)
         L_s = np.clip(L_s, self.action_spec.L_min_s, self.action_spec.L_max_s)
         t_bw_s = np.clip(t_bw_s, self.action_spec.t_bw_min_s, self.action_spec.t_bw_max_s)
 
@@ -303,22 +303,31 @@ class UFSuperCycleEnv(gym.Env):
 
         if feasible:
             # 每步奖励
-            reward = self._calculate_reward(info)
+            reward, rec_reward, energy_reward, res_penalty = self._calculate_reward(info)
+            info["rec_reward"] = rec_reward
+            info["energy_reward"] = energy_reward
+            info["res_penalty"] = res_penalty
+
             self.state = next_state
             terminated = False
         else:
             # 中途失败惩罚
             reward = -10
+            info["rec_reward"] = None
+            info["energy_reward"] = None
+            info["res_penalty"] = None
             terminated = True
 
         # 判断是否到达最大步数
         truncated = self.current_step >= self.max_episode_steps
 
         self.last_action = (L_s, t_bw_s)
-        next_obs = self._get_obs()
+        next_obs = self.get_obs()
 
         info["feasible"] = feasible
         info["step"] = self.current_step
+        info["L_s"] = L_s.copy()
+        info["t_bw_s"] = t_bw_s.copy()
 
         # # ===================== 测试终末奖励:鼓励 TMP 接近初始状态 =====================
         # # 仅在 episode 自然结束(满步但未提前失败)时触发
@@ -344,36 +353,49 @@ class UFSuperCycleEnv(gym.Env):
 
     def _calculate_reward(self, info: dict) -> float:
         """
-        计算强化学习奖励函数
+        计算强化学习奖励函数(扩展版)
 
         功能:
-        - 平衡回收率和残余污染两个目标
+        - 平衡回收率、残余污染和吨水电耗三个目标
         - TMP不直接参与奖励计算(通过失败判定间接影响)
         - 使用 tanh 函数实现平滑的非线性奖励
 
         参数:
-            info (dict): 周期性能指标字典
+            info (dict): 周期性能指标字典,需包含
+                - recovery: 回收率 [0-1]
+                - R_after_ceb: 本周期结束膜阻力
+                - initial_R: 本周期初始膜阻力
+                - delta_R_allow: 本周期允许最大阻力上升
+                - ton_water_energy_kWh_per_m3: 本周期吨水电耗
 
         返回:
-            float: 奖励值(通常在 -2 到 +2 之间)
+            float: 奖励值(通常在 -3 到 +3 之间)
 
         设计思想:
         - 高回收率 → 水资源利用率高 → 正奖励
         - 低残余污染 → 膜长期稳定运行 → 正奖励
-        - 两者需要权衡:过短的过滤时间提高回收率但污染去除不彻底;
-                          过长的过滤时间污染控制好但回收率下降
+        - 低吨水电耗 → 节能 → 正奖励
+        - 三者需要权衡:过短的过滤时间提高回收率但污染去除不彻底;过长时间污染控制好但回收率下降,过高功率增加耗能
 
         参考点设计:
-        - (recovery=0.97, residual_ratio=0.1) → reward ≈ 0(高回收但污染高)
-        - (recovery=0.90, residual_ratio=0.0) → reward ≈ 0(低污染但回收率低)
-        - (recovery≈0.94, residual_ratio≈0.05) → reward > 0(平衡点)
+        - 残余污染:
+            - 高污染参考点 = 1 / self.max_episode_steps
+            - 平衡点 = 0.5 / self.max_episode_steps
+        - 吨水电耗:
+            - 高点 = 0.1034 kWh/m³
+            - 平衡点 = 0.1011 kWh/m³
+            - 低点 = 0.0993 kWh/m³
+        - 回收率参考点保持原有设计
         """
         # ========== 提取性能指标 ==========
         recovery = info["recovery"]  # 回收率 [0-1]
 
         # 污染比例:实际上升的阻力 / 允许上升的阻力
         # 允许上升的阻力值 = 当前阻力值软上限 - 当前阻力
-        residual_ratio = (info["R_after_ceb"] - info["initial_R"]) / info["delta_R_allow"]
+        residual_ratio = info['residual_ratio']
+
+        # 吨水电耗指标
+        energy = info["ton_water_energy_kWh_per_m3"]
 
         # ========== 回收率奖励项 ==========
         # 将回收率归一化到 [0, 1] 区间(基于预期范围)
@@ -386,22 +408,39 @@ class UFSuperCycleEnv(gym.Env):
         # - k_rec 控制曲线陡峭程度,越大变化越陡
         rec_reward = np.clip(np.tanh(self.reward_params.k_rec * (rec_norm - 0.5)), -1, 1)
 
-        # ========== 污染惩罚项 ==========
-        # 使用 tanh 函数构建惩罚曲线
-        # - residual_ratio < rr0 时,res_penalty > 0(奖励低污染)
-        # - residual_ratio > rr0 时,res_penalty < 0(惩罚高污染)
+        # ========== 残余污染惩罚项 ==========
+        # 新参考点:每步允许上升比例 = 1 / max_episode_steps
+        # 平衡点 = 0.5 / max_episode_steps
+        ref_residual = 0.5 / self.max_episode_steps
+
+        # 使用 tanh 构建惩罚曲线
+        # - residual_ratio < 平衡点时,res_penalty > 0(奖励低污染)
+        # - residual_ratio > 平衡点时,res_penalty < 0(惩罚高污染)
         # - k_res 控制曲线陡峭程度
-        res_penalty = -np.tanh(self.reward_params.k_res * (residual_ratio / self.reward_params.rr0 - 1))
+        res_penalty = -np.tanh(self.reward_params.k_res * (residual_ratio / ref_residual - 1))
+
+        # ========== 吨水电耗奖励项 ==========
+        # 设置高/平衡/低点
+        energy_low = 0.0993
+        energy_high = 0.1034
+
+        # 将能耗归一化到 [0, 1],平衡点对应 energy_norm = 0.5
+        energy_norm = (energy - energy_low) / (energy_high - energy_low)
+
+        # 使用 tanh 构建平滑奖励
+        # - energy_norm < 0.5 时,energy_reward > 0(节能奖励)
+        # - energy_norm > 0.5 时,energy_reward < 0(高能耗惩罚)
+        # - k_energy 控制曲线陡峭程度
+        energy_reward = -np.tanh(self.reward_params.k_energy * (energy_norm - 0.5))
 
         # ========== 组合奖励 ==========
-        # 简单线性组合两项(也可以加权)
-        total_reward = rec_reward + res_penalty
+        # 简单线性组合三项(为污染项加权)
+        total_reward = rec_reward + 2.0 * res_penalty + energy_reward
+
 
         # 可选:添加平移项使特定点的奖励为零(当前未使用)
         # total_reward -= offset
 
-        return total_reward
-
-
+        return total_reward, rec_reward, energy_reward, res_penalty
 
 

+ 6 - 4
models/uf-rl/uf_train/env/uf_physics.py

@@ -355,6 +355,7 @@ class UFPhysicsModel:
             self.resistance_from_tmp(max_tmp_during_filtration, state.q_UF, state.temp),
             1e-6
         )
+        residual_ratio = (R_after_ceb - initial_R) / delta_R_allow
 
         # ========== 构建性能指标字典 ==========
         info = {
@@ -384,6 +385,7 @@ class UFPhysicsModel:
             "R_after_ceb": R_after_ceb,  # CEB后膜阻力
             "max_residual_increase_per_run": max_residual_increase,  # 最大残余污染增量
             "delta_R_allow": delta_R_allow,  # 污染允许增长空间
+            "residual_ratio" : residual_ratio, # 污染上升比例
 
             # 能耗指标
             "ton_water_energy_kWh_per_m3": ton_water_energy,  # 吨水电耗
@@ -394,12 +396,12 @@ class UFPhysicsModel:
         next_state.TMP = tmp_after_ceb
         next_state.R = R_after_ceb
 
-        # ========== 可选更新的参数(当前保持不变) ==========
+        # ========== 可选更新的参数 ==========
         # 这些参数可根据实际情况动态调整,预留扩展接口
+        next_state.nuK = state.nuK  # 短期污染系数
         next_state.slope = state.slope  # 长期污染斜率
         next_state.power = state.power  # 长期污染幂次
         next_state.ceb_removal = state.ceb_removal  # CEB去除能力
-        next_state.nuK = state.nuK  # 短期污染系数
         next_state.q_UF = state.q_UF  # 过滤流量
         next_state.temp = state.temp  # 水温
 
@@ -452,7 +454,7 @@ class UFPhysicsModel:
 
         # 条件3:污染增长比例超过容许范围
         residual_increase = (R_after_ceb - R0) / delta_R_allow
-        if residual_increase > 1 / 45:
+        if residual_increase > 1 / 30:
             return False  # 失败
 
         # 所有条件通过
@@ -461,7 +463,7 @@ class UFPhysicsModel:
     def check_dead_initial_state(
             self,
             init_state: UFState,
-            max_steps: int = 15,
+            max_steps: int = 45,
             L_s: float = 3800.0,
             t_bw_s: float = 60.0
     ) -> bool:

+ 186 - 0
models/uf-rl/uf_train/rl_model/DQN/dqn_decider.py

@@ -0,0 +1,186 @@
+"""
+UF 超滤系统 DQN 决策脚本(与当前 DQNTrainer 严格对齐)
+
+功能定位:
+- 加载已训练好的 DQN 模型
+- 构造与训练阶段完全一致的环境
+- 执行单步动作推理(predict)
+- 输出模型建议的工程动作参数(L_s, t_bw_s)
+
+注意:
+- 本脚本【不 step 环境】
+- 不计算 reward
+- 不进行 episode rollout
+"""
+
+from pathlib import Path
+import numpy as np
+
+# ============================================================
+# 1. UF 环境与物理模型
+# ============================================================
+from uf_train.env.uf_env import UFSuperCycleEnv
+from uf_train.env.uf_physics import UFPhysicsModel
+from uf_train.env.uf_resistance_models_load import load_resistance_models
+from uf_train.env.env_params import (
+    UFPhysicsParams,
+    UFRewardParams,
+    UFActionSpec,
+    UFStateBounds,
+)
+
+# ============================================================
+# 2. Stable-Baselines3
+# ============================================================
+from stable_baselines3 import DQN
+
+
+# ============================================================
+# 3. DQN 决策器
+# ============================================================
+class UFDQNDecider:
+    """
+    UF 超滤 DQN 决策器(Inference Only)
+
+    设计原则:
+    1. 与训练环境参数级一致
+    2. 决策侧不推进环境
+    3. 不依赖 Trainer 内部状态
+    """
+
+    def __init__(
+        self,
+        physics,
+        model_path,
+        seed: int = 0,
+    ):
+        """
+        Parameters
+        ----------
+        model_path
+            dqn_model.zip 的路径
+        reset_state_pool :
+            ResetStatePoolLoader.split() 得到的 pool(train / val 均可)
+        seed : int
+            随机种子(推理阶段主要用于 env.reset)
+        """
+
+        self.action_spec = UFActionSpec()
+        reward_params = UFRewardParams()
+        state_bounds = UFStateBounds()
+
+        self.env = UFSuperCycleEnv(
+            physics=physics,
+            reward_params=reward_params,
+            action_spec=self.action_spec,
+            statebounds=state_bounds,
+            real_state_pool=None,
+            RANDOM_SEED=seed,
+        )
+
+        model_path = Path(model_path)
+        if not model_path.exists():
+            raise FileNotFoundError(f"DQN 模型不存在: {model_path}")
+
+        self.model = DQN.load(
+            path=str(model_path),
+            env=self.env,          # ⚠ 必须提供 env
+        )
+
+    # ========================================================
+    # 对外决策接口
+    # ========================================================
+    def decide(self, state: np.ndarray | None = None) -> dict:
+        """
+        单步决策(不 step 环境)
+
+        Parameters
+        ----------
+        state : np.ndarray | None
+            - None:env.reset() 从 reset_state_pool 抽样状态
+            - 非 None:使用外部系统提供的状态
+
+        Returns
+        -------
+        dict
+            {
+                "action_id": int,
+                "L_s": float,
+                "t_bw_s": float,
+            }
+        """
+
+        # ----------------------------------------------------
+        # 4.1 获取观测状态
+        # ----------------------------------------------------
+        if state is None:
+            obs = self.env.reset()
+        else:
+            obs = self.env.get_obs(state) # 获取归一化状态作为策略网络输入
+
+        # ----------------------------------------------------
+        # 4.2 DQN 推理(确定性)
+        # ----------------------------------------------------
+        action, _ = self.model.predict(obs, deterministic=True)
+        action_id = int(action)
+
+
+        # ----------------------------------------------------
+        # 4.3 动作解码(工程语义)
+        # ----------------------------------------------------
+        L_s, t_bw_s = self.env.get_action_values(action_id)
+
+        return {
+            "action_id": action_id,
+            "L_s": L_s,
+            "t_bw_s": t_bw_s,
+        }
+
+
+# ============================================================
+# 5. 示例调用(调试用)
+# ============================================================
+if __name__ == "__main__":
+
+    from uf_train.data_to_rl.data_splitter import ResetStatePoolLoader
+
+    # --------------------------------------------------------
+    # 模型路径(来自 Trainer.save())
+    # --------------------------------------------------------
+    MODEL_PATH = Path(
+        "models/uf-rl/model_result/uf_dqn_tensorboard/xxx/dqn_model.zip"
+    )
+
+    # --------------------------------------------------------
+    # Reset state pool
+    # --------------------------------------------------------
+    RESET_STATE_CSV = Path(
+        "datasets/rl_ready/output/reset_state_pool.csv"
+    )
+
+    loader = ResetStatePoolLoader(
+        csv_path=RESET_STATE_CSV,
+        train_ratio=0.8,
+        shuffle=False,
+        random_state=2025,
+    )
+
+    _, val_pool = loader.split()
+
+    # --------------------------------------------------------
+    # 初始化决策器
+    # --------------------------------------------------------
+    decider = UFDQNDecider(
+        model_path=MODEL_PATH,
+        reset_state_pool=val_pool,
+    )
+
+    # --------------------------------------------------------
+    # 执行一次决策
+    # --------------------------------------------------------
+    decision = decider.decide()
+
+    print("===== DQN 决策结果 =====")
+    print(f"Action ID : {decision['action_id']}")
+    print(f"L_s       : {decision['L_s']} s")
+    print(f"t_bw_s    : {decision['t_bw_s']} s")

BIN
models/uf-rl/uf_train/rl_model/DQN/dqn_model.zip


+ 20 - 4
models/uf-rl/uf_train/rl_model/DQN/dqn_trainer.py

@@ -14,7 +14,7 @@ class DQNTrainer:
     - 在测试集环境上评估策略
     """
 
-    def __init__(self, env, params, callback=None):
+    def __init__(self, env, params, callback=None,PROJECT_ROOT=None):
         """
         初始化训练器
 
@@ -26,25 +26,41 @@ class DQNTrainer:
         self.env = env
         self.params = params
         self.callback = callback
+        self.PROJECT_ROOT = PROJECT_ROOT
         self.log_dir = self._create_log_dir()  # 创建TensorBoard日志目录
         self.model = self._create_model()      # 创建DQN模型
 
     # ------------------- 私有方法 -------------------
     def _create_log_dir(self):
         """
-        创建TensorBoard日志目录
+        创建 TensorBoard 日志目录(固定在 PROJECT_ROOT/model_result/uf_dqn_tensorboard 下)
         """
+        import os
+        import time
+
+        # 1️⃣ 时间戳,用于区分每次训练
         timestamp = time.strftime("%Y%m%d-%H%M%S")
+
+        # 2️⃣ 将浮点参数转成整数便于命名
         lr_int = int(self.params.learning_rate * 1e4)
         gamma_int = int(self.params.gamma * 100)
         exp_int = int(self.params.exploration_fraction * 100)
 
-        log_name = f"DQN_lr{lr_int}_buf{self.params.buffer_size}_bs{self.params.batch_size}_gamma{gamma_int}_exp{exp_int}_{self.params.remark}_{timestamp}"
+        # 3️⃣ 构建日志目录名称
+        log_name = (
+            f"DQN_lr{lr_int}_buf{self.params.buffer_size}_bs{self.params.batch_size}"
+            f"_gamma{gamma_int}_exp{exp_int}_{self.params.remark}_{timestamp}"
+        )
 
-        base_dir = os.path.join(os.getcwd(), "uf_dqn_tensorboard")
+        # 4️⃣ 固定日志存放位置:PROJECT_ROOT/model_result/uf_dqn_tensorboard
+        # 假设在 run_dqn_train.py 中定义了 PROJECT_ROOT = "models/uf-rl"
+        base_dir = os.path.join(self.PROJECT_ROOT, "model_result", "uf_dqn_tensorboard")
         os.makedirs(base_dir, exist_ok=True)
+
+        # 5️⃣ 完整日志目录路径
         log_dir = os.path.join(base_dir, log_name)
         os.makedirs(log_dir, exist_ok=True)
+
         return log_dir
 
     def _create_model(self):

BIN
models/uf-rl/uf_train/rl_model/DQN/loss_test.png


BIN
models/uf-rl/uf_train/rl_model/DQN/model/dqn_model.zip


BIN
models/uf-rl/uf_train/rl_model/DQN/model/loss.png


BIN
models/uf-rl/uf_train/rl_model/DQN/reward_test.png


+ 251 - 0
models/uf-rl/uf_train/rl_model/DQN/run_dqn_decide.py

@@ -0,0 +1,251 @@
+"""
+run_dqn_decide.py
+
+UF 超滤 DQN 决策主入口(Inference / Online Assist)
+
+职责:
+1. 构造物理世界(physics)
+2. 实例化决策器(UFDQNDecider)
+3. 构造当前工厂状态(observation)
+4. 调用模型给出策略建议
+5. 生成 PLC 下发指令(限幅 / 限速)
+6. 评估该指令在物理模型下的效果(只评估,不下发)
+"""
+
+from pathlib import Path
+import numpy as np
+
+# ========== 参数 / 物理 ==========
+from uf_train.env.uf_resistance_models_load import load_resistance_models
+from uf_train.env.uf_physics import UFPhysicsModel
+from uf_train.env.env_params import UFState, UFPhysicsParams, UFStateBounds, UFRewardParams, UFActionSpec
+
+
+# ========== 决策器 ==========
+from uf_train.rl_model.DQN.dqn_decider import UFDQNDecider
+
+
+def build_physics():
+    """
+    构造与训练一致的物理模型(只做一次)
+    """
+    phys_params = UFPhysicsParams()
+    res_fp, res_bw = load_resistance_models(phys_params)
+
+    physics = UFPhysicsModel(
+        phys_params=phys_params,
+        resistance_model_fp=res_fp,
+        resistance_model_bw=res_bw,
+    )
+    return physics
+
+def generate_plc_instructions(current_L_s, current_t_bw_s, model_prev_L_s, model_prev_t_bw_s, model_L_s, model_t_bw_s):
+    """
+    根据工厂当前值、模型上一轮决策值和模型当前轮决策值,生成PLC指令。
+
+    新增功能:
+    1. 处理None值情况:如果模型上一轮值为None,则使用工厂当前值;
+       如果工厂当前值也为None,则返回None并提示错误。
+    """
+
+    action_spec = UFActionSpec()
+    adjustment_threshold = 1.0
+
+    # 处理None值情况
+    if model_prev_L_s is None:
+        if current_L_s is None:
+            print("错误: 过滤时长的工厂当前值和模型上一轮值均为None")
+            return None, None
+        else:
+            # 使用工厂当前值作为基准
+            effective_current_L = current_L_s
+            source_L = "工厂当前值(模型上一轮值为None)"
+    else:
+        # 模型上一轮值不为None,继续检查工厂当前值
+        if current_L_s is None:
+            effective_current_L = model_prev_L_s
+            source_L = "模型上一轮值(工厂当前值为None)"
+        else:
+            effective_current_L = model_prev_L_s
+            source_L = "模型上一轮值"
+
+    # 对反洗时长进行同样的处理
+    if model_prev_t_bw_s is None:
+        if current_t_bw_s is None:
+            print("错误: 反洗时长的工厂当前值和模型上一轮值均为None")
+            return None, None
+        else:
+            effective_current_t_bw = current_t_bw_s
+            source_t_bw = "工厂当前值(模型上一轮值为None)"
+    else:
+        if current_t_bw_s is None:
+            effective_current_t_bw = model_prev_t_bw_s
+            source_t_bw = "模型上一轮值(工厂当前值为None)"
+        else:
+            effective_current_t_bw = model_prev_t_bw_s
+            source_t_bw = "模型上一轮值"
+
+    # 检测所有输入值是否在规定范围内(只对非None值进行检查)
+    # 工厂当前值检查(警告)
+    if current_L_s is not None and not (action_spec.L_min_s <= current_L_s <= action_spec.L_max_s):
+        print(f"警告: 当前过滤时长 {current_L_s} 秒不在允许范围内 [{action_spec.L_min_s}, {action_spec.L_max_s}]")
+    if current_t_bw_s is not None and not (action_spec.t_bw_min_s <= current_t_bw_s <= action_spec.t_bw_max_s):
+        print(f"警告: 当前反洗时长 {current_t_bw_s} 秒不在允许范围内 [{action_spec.t_bw_min_s}, {action_spec.t_bw_max_s}]")
+
+    # 模型上一轮决策值检查(警告)
+    if model_prev_L_s is not None and not (action_spec.L_min_s <= model_prev_L_s <= action_spec.L_max_s):
+        print(f"警告: 模型上一轮过滤时长 {model_prev_L_s} 秒不在允许范围内 [{action_spec.L_min_s}, {action_spec.L_max_s}]")
+    if model_prev_t_bw_s is not None and not (action_spec.t_bw_min_s <= model_prev_t_bw_s <= action_spec.t_bw_max_s):
+        print(f"警告: 模型上一轮反洗时长 {model_prev_t_bw_s} 秒不在允许范围内 [{action_spec.t_bw_min_s}, {action_spec.t_bw_max_s}]")
+
+    # 模型当前轮决策值检查(错误)
+    if model_L_s is None:
+        raise ValueError("错误: 决策模型建议的过滤时长不能为None")
+    elif not (action_spec.L_min_s <= model_L_s <= action_spec.L_max_s):
+        raise ValueError(f"错误: 决策模型建议的过滤时长 {model_L_s} 秒不在允许范围内 [{action_spec.L_min_s}, {action_spec.L_max_s}]")
+
+    if model_t_bw_s is None:
+        raise ValueError("错误: 决策模型建议的反洗时长不能为None")
+    elif not (action_spec.t_bw_min_s <= model_t_bw_s <= action_spec.t_bw_max_s):
+        raise ValueError(f"错误: 决策模型建议的反洗时长 {model_t_bw_s} 秒不在允许范围内 [{action_spec.t_bw_min_s}, {action_spec.t_bw_max_s}]")
+
+    print(f"过滤时长基准: {source_L}, 值: {effective_current_L}")
+    print(f"反洗时长基准: {source_t_bw}, 值: {effective_current_t_bw}")
+
+    # 使用选定的基准值进行计算调整
+    L_diff = model_L_s - effective_current_L
+    L_adjustment = 0
+    if abs(L_diff) >= adjustment_threshold * action_spec.L_step_s:
+        if L_diff >= 0:
+            L_adjustment = action_spec.L_step_s
+        else:
+            L_adjustment = -action_spec.L_step_s
+    next_L_s = effective_current_L + L_adjustment
+
+    t_bw_diff = model_t_bw_s - effective_current_t_bw
+    t_bw_adjustment = 0
+    if abs(t_bw_diff) >= adjustment_threshold * action_spec.t_bw_step_s:
+        if t_bw_diff >= 0:
+            t_bw_adjustment = action_spec.t_bw_step_s
+        else:
+            t_bw_adjustment = -action_spec.t_bw_step_s
+    next_t_bw_s = effective_current_t_bw + t_bw_adjustment
+
+    return next_L_s, next_t_bw_s
+
+
+
+def calc_uf_cycle_metrics(current_state, max_tmp_during_filtration, min_tmp_during_filtration, L_s: float, t_bw_s: float):
+    """
+    计算 UF 超滤系统的核心性能指标
+
+    参数:
+
+        L_s (float): 单次过滤时间(秒)
+        t_bw_s (float): 单次反洗时间(秒)
+
+    返回:
+        dict: {
+            "k_bw_per_ceb": 小周期次数,
+            "ton_water_energy_kWh_per_m3": 吨水电耗,
+            "recovery": 回收率,
+            "net_delivery_rate_m3ph": 净供水率 (m³/h),
+            "daily_prod_time_h": 日均产水时间 (小时/天)
+            "max_permeability": 全周期最高渗透率(lmh/bar)
+        }
+    """
+
+    # 模拟该参数下的超级周期
+    info, next_state = physics.simulate_one_supercycle(current_state, L_s=L_s, t_bw_s=t_bw_s)
+
+    # 获得模型模拟周期信息
+    k_bw_per_ceb = info["k_bw_per_ceb"]
+    ton_water_energy_kWh_per_m3 = info["ton_water_energy_kWh_per_m3"]
+    recovery = info["recovery"]
+    daily_prod_time_h = info["daily_prod_time_h"]
+
+    # 获得模型模拟周期内最高跨膜压差/最低跨膜压差
+    if max_tmp_during_filtration is None:
+        max_tmp_during_filtration = info["max_TMP_during_filtration"]
+    if min_tmp_during_filtration is None:
+        min_tmp_during_filtration = info["min_TMP_during_filtration"]
+
+    # 计算最高渗透率
+    max_permeability = 100 * current_state.q_UF / (128*40) / min_tmp_during_filtration
+
+
+    return {
+        "k_bw_per_ceb": k_bw_per_ceb,
+        "ton_water_energy_kWh_per_m3": ton_water_energy_kWh_per_m3,
+        "recovery": recovery,
+        "daily_prod_time_h": daily_prod_time_h,
+        "max_permeability": max_permeability
+    }
+
+def run_dqn_decide(
+    model_path: Path,
+    physics,
+    # -------- 工厂当前值 --------
+    current_state: UFState
+):
+    """
+    单轮 DQN 决策流程
+    """
+
+    # 构造决策器
+    decider = UFDQNDecider(
+        physics=physics,
+        model_path=model_path,
+        seed=0,
+    )
+    # 模型决策(不推进真实环境)
+
+    decision = decider.decide(current_state)
+    action_id = decision["action_id"]
+    model_L_s = decision["L_s"]
+    model_t_bw_s = decision["t_bw_s"]
+
+    return action_id, model_L_s, model_t_bw_s
+
+
+# ==============================
+# 示例调用
+# ==============================
+if __name__ == "__main__":
+
+    MODEL_PATH = "model/dqn_model.zip"
+    TMP0 = 0.03  # 原始 TMP0
+    q_UF = 300 # 进水流量
+    temp = 20.0 #进水温度
+    current_state = UFState(TMP=TMP0, q_UF=q_UF, temp=temp)
+
+    physics = build_physics()
+
+    action_id, model_L_s, model_t_bw_s = run_dqn_decide(
+        model_path=MODEL_PATH,
+        physics=physics,
+        current_state=current_state,
+    ) # 环境实例化,模型加载等功能放在UFDQNDecider类中
+
+    current_L_s = 3800
+    current_t_bw_s = 40
+    model_prev_L_s = 4040
+    model_prev_t_bw_s = 60
+    L_s, t_bw_s = generate_plc_instructions(current_L_s, current_t_bw_s, model_prev_L_s, model_prev_t_bw_s, model_L_s,
+                                            model_t_bw_s)  # 获取模型下发指令
+
+    L_s = 4100
+    t_bw_s = 96
+    max_tmp_during_filtration = 0.050176 # 新增工厂数据接口:周期最高/最低跨膜压差,无工厂数据接入时传入None,calc_uf_cycle_metrics()自动获取模拟周期中的跨膜压差最值
+    min_tmp_during_filtration = 0.012496
+    execution_result = calc_uf_cycle_metrics(current_state, max_tmp_during_filtration, min_tmp_during_filtration, L_s, t_bw_s)
+    print("\n===== 单步决策结果 =====")
+    print(f"模型选择的动作: {action_id}")
+    print(f"模型选择的L_s: {model_L_s} 秒, 模型选择的t_bw_s: {model_t_bw_s} 秒")
+    print(f"指令下发的L_s: {L_s} 秒, 指令下发的t_bw_s: {t_bw_s} 秒")
+    print(f"指令对应的反洗次数: {execution_result['k_bw_per_ceb']}")
+    print(f"指令对应的吨水电耗: {execution_result['ton_water_energy_kWh_per_m3']}")
+    print(f"指令对应的回收率: {execution_result['recovery']}")
+    print(f"指令对应的日均产水时间: {execution_result['daily_prod_time_h']}")
+    print(f"指令对应的最高渗透率: {execution_result['max_permeability']}")
+

+ 2 - 1
models/uf-rl/uf_train/rl_model/DQN/run_dqn_train.py

@@ -161,6 +161,7 @@ def main():
         env=train_env,
         params=dqn_params,
         callback=callback,
+        PROJECT_ROOT=PROJECT_ROOT
     )
 
 
@@ -208,7 +209,7 @@ if __name__ == "__main__":
     # 2. 全局配置
     # ============================================================
     RANDOM_SEED = 2025
-    TOTAL_TIMESTEPS = 1000000
+    TOTAL_TIMESTEPS = 1500000
 
     RESET_STATE_CSV = (
             PROJECT_ROOT