gat_lstm.py 3.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. # gat_lstm.py
  2. import torch
  3. import torch.nn as nn
  4. # 单个独立模型(对应1个因变量)
  5. class SingleGATLSTM(nn.Module):
  6. def __init__(self, args):
  7. """
  8. 初始化单个预测子模型
  9. :param args: 包含模型参数的配置对象(如输入特征数、隐藏层大小等)
  10. """
  11. super(SingleGATLSTM, self).__init__()
  12. self.args = args
  13. # 独立的LSTM层
  14. self.lstm = nn.LSTM(
  15. input_size=args.feature_num,
  16. hidden_size=args.hidden_size,
  17. num_layers=args.num_layers,
  18. batch_first=True
  19. )
  20. # 独立的输出层:将LSTM输出映射到预测结果(输出长度为output_size)
  21. self.final_linear = nn.Sequential(
  22. nn.Linear(args.hidden_size, args.hidden_size),
  23. nn.LeakyReLU(0.01),
  24. nn.Dropout(args.dropout * 0.4),
  25. nn.Linear(args.hidden_size, args.output_size)
  26. )
  27. self._init_weights()
  28. def _init_weights(self):
  29. """初始化模型权重,提升训练稳定性和收敛速度"""
  30. # 初始化线性层权重
  31. for m in self.modules():
  32. if isinstance(m, nn.Linear):
  33. nn.init.xavier_uniform_(m.weight)
  34. if m.bias is not None:
  35. nn.init.zeros_(m.bias)
  36. elif isinstance(m, nn.BatchNorm1d):
  37. nn.init.constant_(m.weight, 1)
  38. nn.init.constant_(m.bias, 0)
  39. # 初始化LSTM权重
  40. for name, param in self.lstm.named_parameters():
  41. if 'weight_ih' in name:
  42. nn.init.xavier_uniform_(param.data)
  43. elif 'weight_hh' in name:
  44. nn.init.orthogonal_(param.data)
  45. elif 'bias' in name:
  46. param.data.fill_(0)
  47. n = param.size(0)
  48. start, end = n // 4, n // 2
  49. param.data[start:end].fill_(1)
  50. def forward(self, x):
  51. """
  52. 前向传播:输入序列 -> LSTM处理 -> 输出层预测
  53. :param x: 输入张量,形状为[batch_size, seq_len, feature_num]
  54. :return: 预测结果,形状为[batch_size, output_size]
  55. """
  56. batch_size, seq_len, feature_num = x.size()
  57. lstm_out, _ = self.lstm(x)
  58. # 取最后一个时间步的输出
  59. last_out = lstm_out[:, -1, :]
  60. # 输出层预测
  61. output = self.final_linear(last_out)
  62. return output # [batch_size, output_size]
  63. # 16个独立模型的容器(总模型)
  64. class GAT_LSTM(nn.Module):
  65. def __init__(self, args):
  66. """
  67. 初始化多输出模型容器
  68. :param args: 配置参数,labels_num指定目标特征数量(即子模型数量)
  69. """
  70. super(GAT_LSTM, self).__init__()
  71. self.args = args
  72. # 创建16个独立模型(数量由labels_num指定)
  73. self.models = nn.ModuleList([SingleGATLSTM(args) for _ in range(args.labels_num)])
  74. def forward(self, x):
  75. """
  76. 前向传播:所有子模型并行处理输入,拼接结果
  77. :param x: 输入张量,形状为[batch_size, seq_len, feature_num]
  78. :return: 拼接后的预测结果,形状为[batch_size, output_size * labels_num]
  79. """
  80. outputs = []
  81. for model in self.models:
  82. outputs.append(model(x)) # 每个输出为[batch, output_size]
  83. return torch.cat(outputs, dim=1) # 拼接后[batch, output_size * labels_num]