rl_tracing.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. # -*- coding: utf-8 -*-
  2. """
  3. rl_tracing.py: 强化学习链路级异常溯源
  4. 基于 PPO (Proximal Policy Optimization) 的 Actor-Critic 架构。
  5. 结合了专家经验的“行为克隆 (Imitation Learning)”与“自主探索 (Reinforcement Learning)”,
  6. 实现从“诱发变量”逆流而上寻找“根因变量”的智能寻路。
  7. """
  8. import torch
  9. import torch.nn as nn
  10. import torch.optim as optim
  11. import torch.nn.functional as F
  12. from torch.distributions import Categorical
  13. import numpy as np
  14. import pandas as pd
  15. import os
  16. from tqdm import tqdm
  17. from config import config
  18. # ----------------- 1. 环境 -----------------
  19. class CausalTracingEnv:
  20. """
  21. 强化学习交互环境。
  22. 定义了 AI 智能体的状态(State)、动作空间(Action Space)以及奖励机制(Reward Function)。
  23. """
  24. def __init__(self, causal_graph, window_scores, threshold_df, expert_knowledge=None):
  25. self.sensor_list = causal_graph['sensor_list']
  26. self.map = causal_graph['sensor_to_idx']
  27. self.idx_to_sensor = {v: k for k, v in self.map.items()}
  28. self.adj = causal_graph['adj_matrix']
  29. self.scores = window_scores
  30. # 专家历史异常链路知识库
  31. self.expert_knowledge = expert_knowledge if expert_knowledge else {}
  32. self.num_sensors = len(self.sensor_list)
  33. # 解析每个传感器的层级 (Layer) 和归属设备 (Device) 属性,用于限制非法动作
  34. self.node_props = {}
  35. col_one_layer = self._find_col(threshold_df, config.KEYWORD_LAYER)
  36. col_device = self._find_col(threshold_df, config.KEYWORD_DEVICE)
  37. df_indexed = threshold_df.set_index('ID')
  38. dict_one = df_indexed[col_one_layer].to_dict() if col_one_layer else {}
  39. dict_dev = df_indexed[col_device].to_dict() if col_device else {}
  40. for name, idx in self.map.items():
  41. l_val = dict_one.get(name, -1)
  42. try: l_val = int(l_val)
  43. except: l_val = 0
  44. d_val = dict_dev.get(name, None)
  45. d_val = str(d_val).strip() if pd.notna(d_val) and str(d_val).strip() != '' else None
  46. self.node_props[idx] = {'one_layer': l_val, 'device': d_val}
  47. # 初始化回合状态变量
  48. self.current_window_idx = 0
  49. self.current_node_idx = 0
  50. self.prev_node_idx = 0
  51. self.trigger_node_idx = 0
  52. self.path = []
  53. self.current_expert_paths = []
  54. self.target_roots = set()
  55. def _find_col(self, df, keyword):
  56. if keyword in df.columns: return keyword
  57. for c in df.columns:
  58. if c.lower() == keyword.lower(): return c
  59. return None
  60. def reset(self, force_window_idx=None, force_trigger=None):
  61. """
  62. 重置环境,开启新的一轮寻路 (Episode)。
  63. 随机选取一个发生异常的时间窗口和触发报警的传感器作为起点。
  64. """
  65. if force_window_idx is not None:
  66. self.current_window_idx = force_window_idx
  67. t_name = force_trigger
  68. else:
  69. found = False
  70. # 尝试随机寻找一个存在触发变量异常的时间窗口
  71. for _ in range(100):
  72. w_idx = np.random.randint(len(self.scores))
  73. win_scores = self.scores[w_idx]
  74. candidates = []
  75. for t_name in config.TRIGGER_SENSORS:
  76. if t_name in self.map:
  77. idx = self.map[t_name]
  78. # 只有当诱发变量得分超过触发阈值,才将其作为候选起点
  79. if win_scores[idx] > config.TRIGGER_SCORE_THRESH:
  80. candidates.append(t_name)
  81. if candidates:
  82. self.current_window_idx = w_idx
  83. t_name = np.random.choice(candidates)
  84. found = True
  85. break
  86. if not found:
  87. self.current_window_idx = np.random.randint(len(self.scores))
  88. t_name = list(self.map.keys())[0]
  89. # 初始化路径状态
  90. self.current_node_idx = self.map.get(t_name, 0)
  91. self.trigger_node_idx = self.current_node_idx
  92. self.prev_node_idx = self.current_node_idx
  93. self.path = [self.current_node_idx]
  94. # 加载对应的专家知识作为本回合的目标(用于计算奖励)
  95. self.target_roots = set()
  96. self.current_expert_paths = []
  97. if self.current_node_idx in self.expert_knowledge:
  98. entry = self.expert_knowledge[self.current_node_idx]
  99. self.target_roots = entry['roots']
  100. self.current_expert_paths = entry['paths']
  101. return self._get_state()
  102. def _get_state(self):
  103. """
  104. 获取当前状态观测值 (Observation)。
  105. 将离散的 ID 信息与连续的异常分数/层级信息打包,供神经网络提取特征。
  106. """
  107. curr_score = self.scores[self.current_window_idx, self.current_node_idx]
  108. prev_score = self.scores[self.current_window_idx, self.prev_node_idx]
  109. curr_layer = self.node_props[self.current_node_idx]['one_layer'] / 20.0
  110. return (
  111. torch.LongTensor([self.current_node_idx, self.prev_node_idx, self.trigger_node_idx]),
  112. torch.FloatTensor([curr_score, prev_score, curr_layer])
  113. )
  114. def get_valid_actions(self, curr_idx):
  115. """
  116. 动作掩码 (Action Masking) 机制。
  117. 根据因果图和业务规则,告诉 AI 当前这一步可以走向哪些邻居节点。
  118. """
  119. # 从邻接矩阵获取物理相邻的节点
  120. neighbors = np.where(self.adj[curr_idx] == 1)[0]
  121. curr_props = self.node_props[curr_idx]
  122. curr_l, curr_d = curr_props['one_layer'], curr_props['device']
  123. valid = []
  124. for n in neighbors:
  125. if n in self.path: continue
  126. tgt_props = self.node_props[n]
  127. tgt_l, tgt_d = tgt_props['one_layer'], tgt_props['device']
  128. # 双重保险:再次校验层级和设备约束
  129. if curr_l != 0 and tgt_l != 0:
  130. if not ((tgt_l == curr_l) or (tgt_l == curr_l - 1)): continue
  131. if (curr_d is not None) and (tgt_d is not None):
  132. if curr_d != tgt_d: continue
  133. valid.append(n)
  134. return np.array(valid)
  135. def step(self, action_idx):
  136. """
  137. AI 执行一步动作,环境返回新的状态和获得的奖励 (Reward)。
  138. 奖励函数 (Reward Function) 是整个 AI 的价值观,决定了它的行为倾向。
  139. """
  140. prev = self.current_node_idx
  141. self.prev_node_idx = prev
  142. self.current_node_idx = action_idx
  143. self.path.append(action_idx)
  144. score_curr = self.scores[self.current_window_idx, self.current_node_idx]
  145. reward = 0.0
  146. done = False
  147. # [奖励 1:模仿专家经验]
  148. # 如果走到了历史记录过的异常节点上,给予正反馈
  149. in_expert_nodes = False
  150. for e_path in self.current_expert_paths:
  151. if action_idx in e_path:
  152. in_expert_nodes = True
  153. break
  154. if in_expert_nodes: reward += 2.0
  155. else: reward -= 0.2 # 探索未知节点的轻微惩罚,避免随意瞎走
  156. # [奖励 2:命中最终根因]
  157. # 成功找到了真正的罪魁祸首,给予巨额奖励并结束本回合
  158. if action_idx in self.target_roots:
  159. reward += 10.0
  160. done = True
  161. # [奖励 3:异常梯度导向]
  162. # 鼓励 AI 顺着异常分越来越高的方向走(异常传导衰减原理)
  163. score_prev = self.scores[self.current_window_idx, prev]
  164. diff = score_curr - score_prev
  165. if diff > 0: reward += diff * 3.0
  166. else: reward -= 0.5
  167. # [惩罚 1:路径过长] 防止发散
  168. if len(self.path) >= config.MAX_PATH_LENGTH:
  169. done = True
  170. if action_idx not in self.target_roots: reward -= 5.0
  171. # [惩罚 2:走入正常区域] 如果走到了异常分很低的节点,说明找错方向了
  172. if score_curr < 0.15 and len(self.path) > 3:
  173. done = True
  174. reward -= 2.0
  175. return self._get_state(), reward, done, {}
  176. # ----------------- 2. 神经网络架构 (Actor-Critic) -----------------
  177. class TargetDrivenActorCritic(nn.Module):
  178. """
  179. 智能体的“大脑”,采用 Actor-Critic 双头输出架构。
  180. Actor 负责决定“下一步去哪”(策略),Critic 负责评估“当前局势有多好”(价值)。
  181. """
  182. def __init__(self, num_sensors, embedding_dim=64, hidden_dim=256):
  183. super().__init__()
  184. self.node_emb = nn.Embedding(num_sensors, embedding_dim)
  185. input_dim = (embedding_dim * 3) + 3
  186. self.shared_net = nn.Sequential(
  187. nn.Linear(input_dim, hidden_dim),
  188. nn.ReLU(),
  189. nn.LayerNorm(hidden_dim),
  190. nn.Linear(hidden_dim, hidden_dim),
  191. nn.ReLU()
  192. )
  193. self.actor = nn.Linear(hidden_dim, num_sensors)
  194. self.critic = nn.Linear(hidden_dim, 1)
  195. def forward(self, int_data, float_data):
  196. curr_emb = self.node_emb(int_data[:, 0])
  197. prev_emb = self.node_emb(int_data[:, 1])
  198. trig_emb = self.node_emb(int_data[:, 2])
  199. x = torch.cat([curr_emb, prev_emb, trig_emb, float_data], dim=1)
  200. feat = self.shared_net(x)
  201. return self.actor(feat), self.critic(feat)
  202. # ----------------- 3. 训练器 -----------------
  203. class RLTrainer:
  204. def __init__(self, causal_graph, train_scores, threshold_df):
  205. self.sensor_map = causal_graph['sensor_to_idx']
  206. self.idx_to_sensor = {v: k for k, v in self.sensor_map.items()}
  207. self.threshold_df = threshold_df
  208. self.causal_graph = causal_graph
  209. self.expert_knowledge, self.bc_samples, _ = self._load_expert_data()
  210. self.env = CausalTracingEnv(causal_graph, train_scores, threshold_df, self.expert_knowledge)
  211. self.model = TargetDrivenActorCritic(self.env.num_sensors, config.EMBEDDING_DIM, config.HIDDEN_DIM)
  212. self.optimizer = optim.Adam(self.model.parameters(), lr=config.PPO_LR)
  213. def _load_expert_data(self):
  214. path = config.ABNORMAL_LINK_FILENAME
  215. kb_data = {}
  216. bc_data = []
  217. if not os.path.exists(path): return kb_data, bc_data, None
  218. df = pd.read_excel(path)
  219. for _, row in df.iterrows():
  220. link = str(row.get('Link Path', ''))
  221. if not link: continue
  222. nodes_str = [n.strip() for n in link.replace('→', '->').split('->')]
  223. path_nodes = nodes_str[::-1]
  224. ids = []
  225. valid = True
  226. for n in path_nodes:
  227. if n in self.sensor_map: ids.append(self.sensor_map[n])
  228. else: valid = False; break
  229. if not valid or len(ids)<2: continue
  230. trigger_id = ids[0]
  231. root_id = ids[-1]
  232. if trigger_id not in kb_data:
  233. kb_data[trigger_id] = {'paths': [], 'roots': set(), 'logic': row.get('Process Logic Basis', '')}
  234. kb_data[trigger_id]['paths'].append(ids)
  235. kb_data[trigger_id]['roots'].add(root_id)
  236. for i in range(len(ids) - 1):
  237. curr = ids[i]
  238. prev = ids[max(0, i-1)]
  239. nxt = ids[i+1]
  240. bc_data.append(((curr, prev, trigger_id), nxt))
  241. return kb_data, bc_data, df
  242. def pretrain_bc(self):
  243. """
  244. 第一阶段:行为克隆 (Behavior Cloning) 预训练。
  245. 相当于给 AI 上课死记硬背专家已知的异常链路,让它具备基础的业务常识。
  246. 采用标准的监督学习交叉熵损失。
  247. """
  248. if not self.bc_samples: return
  249. print(f"\n>>> [Step 3.1] 启动BC预训练 ({config.BC_EPOCHS}轮)...")
  250. states_int = torch.LongTensor([list(s) for s, a in self.bc_samples])
  251. actions = torch.LongTensor([a for s, a in self.bc_samples])
  252. states_float = torch.zeros((len(states_int), 3))
  253. states_float[:, 0] = 0.9
  254. states_float[:, 1] = 0.8
  255. loss_fn = nn.CrossEntropyLoss()
  256. pbar = tqdm(range(config.BC_EPOCHS), desc="BC Training")
  257. for epoch in pbar:
  258. logits, _ = self.model(states_int, states_float)
  259. loss = loss_fn(logits, actions)
  260. self.optimizer.zero_grad()
  261. loss.backward()
  262. self.optimizer.step()
  263. if epoch%100==0: pbar.set_postfix({'Loss': f"{loss.item():.4f}"})
  264. def train_ppo(self):
  265. """
  266. 第二阶段:PPO 强化学习自主探索。
  267. AI 在具有不同异常分数分布的真实数据环境中不断试错,
  268. 发现专家库中未登记的新的潜在异常链路。
  269. """
  270. print(f"\n>>> [Step 3.2] 启动PPO训练 ({config.RL_EPISODES}轮)...")
  271. pbar = tqdm(range(config.RL_EPISODES), desc="PPO Training")
  272. rewards_hist = []
  273. for _ in pbar:
  274. state_data = self.env.reset()
  275. done = False
  276. ep_r = 0
  277. b_int, b_float, b_act, b_lp, b_rew, b_mask = [], [], [], [], [], []
  278. while not done:
  279. s_int = state_data[0].unsqueeze(0)
  280. s_float = state_data[1].unsqueeze(0)
  281. valid = self.env.get_valid_actions(s_int[0, 0].item())
  282. if len(valid) == 0: break
  283. logits, _ = self.model(s_int, s_float)
  284. mask = torch.full_like(logits, -1e9)
  285. mask[0, valid] = 0
  286. dist = Categorical(F.softmax(logits+mask, dim=-1))
  287. action = dist.sample()
  288. next_s, r, done, _ = self.env.step(action.item())
  289. b_int.append(s_int); b_float.append(s_float)
  290. b_act.append(action); b_lp.append(dist.log_prob(action))
  291. b_rew.append(r); b_mask.append(1-done)
  292. state_data = next_s
  293. ep_r += r
  294. if len(b_rew) > 1:
  295. self._update_ppo(b_int, b_float, b_act, b_lp, b_rew, b_mask)
  296. rewards_hist.append(ep_r)
  297. if len(rewards_hist)>50: rewards_hist.pop(0)
  298. pbar.set_postfix({'AvgR': f"{np.mean(rewards_hist):.2f}"})
  299. def _update_ppo(self, b_int, b_float, b_act, b_lp, b_rew, b_mask):
  300. """PPO 核心公式计算:折扣回报计算、优势函数、Clip 截断防止策略更新幅度过大"""
  301. returns = []
  302. R = 0
  303. for r, m in zip(reversed(b_rew), reversed(b_mask)):
  304. R = r + config.PPO_GAMMA * R * m
  305. returns.insert(0, R)
  306. returns = torch.tensor(returns)
  307. if returns.numel() > 1 and returns.std() > 1e-5:
  308. returns = (returns - returns.mean()) / (returns.std() + 1e-5)
  309. elif returns.numel() > 1:
  310. returns = returns - returns.mean()
  311. s_int = torch.cat(b_int)
  312. s_float = torch.cat(b_float)
  313. act = torch.stack(b_act)
  314. old_lp = torch.stack(b_lp).detach()
  315. for _ in range(config.PPO_K_EPOCHS):
  316. logits, vals = self.model(s_int, s_float)
  317. dist = Categorical(logits=logits)
  318. new_lp = dist.log_prob(act)
  319. ratio = torch.exp(new_lp - old_lp)
  320. surr1 = ratio * returns
  321. surr2 = torch.clamp(ratio, 1-config.PPO_EPS_CLIP, 1+config.PPO_EPS_CLIP) * returns
  322. v_pred = vals.squeeze()
  323. if v_pred.shape != returns.shape:
  324. v_pred = v_pred.view(-1)
  325. returns = returns.view(-1)
  326. loss = -torch.min(surr1, surr2).mean() + 0.5 * F.mse_loss(v_pred, returns)
  327. self.optimizer.zero_grad()
  328. loss.backward()
  329. self.optimizer.step()
  330. def evaluate(self, test_scores):
  331. """
  332. 第四步:模型验证与评估。
  333. 使用未见过的测试集数据让 AI 跑全流程,评估诊断准确率和新模式发现能力。
  334. 并将结果导出为结构化的 Excel 评估报告。
  335. """
  336. print("\n>>> [Step 4] 评估测试集...")
  337. self.model.eval()
  338. results = []
  339. cnt_detected = 0
  340. cnt_kb_covered = 0
  341. cnt_path_match = 0
  342. cnt_root_match = 0
  343. cnt_new = 0
  344. env = CausalTracingEnv(self.causal_graph, test_scores, self.threshold_df, self.expert_knowledge)
  345. for win_idx in range(len(test_scores)):
  346. scores = test_scores[win_idx]
  347. active = []
  348. for t_name in config.TRIGGER_SENSORS:
  349. if t_name in self.sensor_map:
  350. idx = self.sensor_map[t_name]
  351. if scores[idx] > config.TRIGGER_SCORE_THRESH:
  352. active.append((t_name, idx))
  353. for t_name, t_idx in active:
  354. cnt_detected += 1
  355. state_data = env.reset(force_window_idx=win_idx, force_trigger=t_name)
  356. path_idxs = [t_idx]
  357. done = False
  358. while not done:
  359. s_int = state_data[0].unsqueeze(0)
  360. s_float = state_data[1].unsqueeze(0)
  361. valid = env.get_valid_actions(path_idxs[-1])
  362. if len(valid) == 0: break
  363. logits, _ = self.model(s_int, s_float)
  364. mask = torch.full_like(logits, -1e9)
  365. mask[0, valid] = 0
  366. act = torch.argmax(logits + mask, dim=1).item()
  367. state_data, _, done, _ = env.step(act)
  368. path_idxs.append(act)
  369. if len(path_idxs) >= config.MAX_PATH_LENGTH: done = True
  370. path_names = [self.idx_to_sensor[i] for i in path_idxs]
  371. root = path_names[-1]
  372. root_score = scores[self.sensor_map[root]]
  373. match_status = "未定义"
  374. logic = ""
  375. if t_idx in self.expert_knowledge:
  376. cnt_kb_covered += 1
  377. entry = self.expert_knowledge[t_idx]
  378. logic = entry.get('logic', '')
  379. real_roots = [self.idx_to_sensor[r] for r in entry['roots']]
  380. rm = False
  381. for p_node in path_names:
  382. if p_node in real_roots:
  383. rm = True
  384. break
  385. pm = False
  386. path_set = set(path_idxs)
  387. for exp_p in entry['paths']:
  388. exp_set = set(exp_p)
  389. intersection = len(path_set.intersection(exp_set))
  390. union = len(path_set.union(exp_set))
  391. if union > 0 and (intersection / union) >= 0.6:
  392. pm = True
  393. break
  394. if pm:
  395. match_status = "路径吻合"
  396. cnt_path_match += 1
  397. cnt_root_match += 1
  398. elif rm:
  399. match_status = "仅根因吻合"
  400. cnt_root_match += 1
  401. else:
  402. match_status = "不吻合"
  403. else:
  404. match_status = "新链路"
  405. cnt_new += 1
  406. results.append({
  407. "窗口ID": win_idx,
  408. "诱发变量": t_name,
  409. "溯源路径": "->".join(path_names),
  410. "根因变量": root,
  411. "根因异常分": f"{root_score:.3f}",
  412. "是否知识库": "是" if t_idx in self.expert_knowledge else "否",
  413. "匹配情况": match_status,
  414. "机理描述": logic
  415. })
  416. denom = max(cnt_kb_covered, 1)
  417. summary = [
  418. {"指标": "检测到的总异常样本数", "数值": cnt_detected},
  419. {"指标": "知识库覆盖的样本数", "数值": cnt_kb_covered},
  420. {"指标": "异常链路准确率", "数值": f"{cnt_path_match/denom:.2%}"},
  421. {"指标": "根因准确率", "数值": f"{cnt_root_match/denom:.2%}"},
  422. {"指标": "新发现异常模式数", "数值": cnt_new}
  423. ]
  424. save_path = f"{config.RESULT_SAVE_DIR}/{config.TEST_RESULT_FILENAME}"
  425. with pd.ExcelWriter(save_path, engine='openpyxl') as writer:
  426. pd.DataFrame(summary).to_excel(writer, sheet_name='Sheet1_概览指标', index=False)
  427. pd.DataFrame(results).to_excel(writer, sheet_name='Sheet2_测试集详情', index=False)
  428. print("\n" + "="*50)
  429. print(pd.DataFrame(summary).to_string(index=False))
  430. print(f"\n文件已保存: {save_path}")
  431. print("="*50)
  432. def save_model(self):
  433. path = config.MODEL_FILE_PATH
  434. torch.save(self.model.state_dict(), path)