data_preprocessor.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. # data_preprocessor.py
  2. import os
  3. import torch
  4. import joblib
  5. import numpy as np
  6. import pandas as pd
  7. from tqdm import tqdm
  8. from sklearn.preprocessing import MinMaxScaler
  9. from torch.utils.data import DataLoader, TensorDataset
  10. from concurrent.futures import ThreadPoolExecutor
  11. class DataPreprocessor:
  12. """数据预处理类"""
  13. # 定义必须保留的列
  14. COLUMNS_TO_KEEP = [
  15. 'index',
  16. "AR.1#UF_JSFLOW_O", "AR.2#UF_JSFLOW_O", "AR.1#RO_JSFLOW_O", "AR.2#RO_JSFLOW_O",
  17. "AR.1#UF_JSPRESS_O", "AR.2#UF_JSPRESS_O", "AR.1#RO_JSPRESS_O", "AR.2#RO_JSPRESS_O",
  18. "AR.1#RO_EDJSPRESS_O", "AR.1#RO_SDJSPRESS_O", "AR.2#RO_EDJSPRESS_O", "AR.2#RO_SDJSPRESS_O",
  19. "AR.ZJS_TEMP_O", "AR.ZJS_ZD_O", "AR.RO_JSDD_O", "AR.RO_JSORP_O", "AR.RO_JSPH_O",
  20. "AR.1#UF_V_FB_O", "AR.2#UF_V_FB_O", "AR.1#UFBWB_FRE_FB_O", "AR.2#UFBWB_FRE_FB_O",
  21. "AR.1#RODJB_FRE_FB_O", "AR.1#ROGYB_FRE_FB_O", "AR.1#RODJB_CZ_O", "AR.1#ROGYB_CZ_O",
  22. "AR.2#RODJB_CZ_O", "AR.2#ROGYB_CZ_O", "AR.ROGSB_FRE_FB_O", "AR.UFGSB_FRE_FB_O",
  23. "AR.V_UF1_TJV_KD_FB", "AR.V_UF2_TJV_KD_FB", "AR.CS_LEVEL_O", "AR.UF_CSLEVEL_O",
  24. "AR.UF1_SSD_KMYC", "AR.UF2_SSD_KMYC", "AR.RO1_2D_YC", "AR.PUBLIC_BY_REAL_1",
  25. "1#RO_CSFLOW"
  26. ]
  27. @staticmethod
  28. def load_and_process_data(args, data):
  29. """加载并处理数据,划分训练/验证/测试集"""
  30. # 处理日期
  31. data['date'] = pd.to_datetime(data['date'])
  32. time_interval = pd.Timedelta(minutes=(4 * args.resolution / 60))
  33. window_time_span = time_interval * (args.seq_len + 1)
  34. val_start_date = pd.to_datetime(args.val_start_date)
  35. test_start_date = pd.to_datetime(args.test_start_date)
  36. # 调整时间窗口
  37. adjusted_val_start = val_start_date - window_time_span
  38. adjusted_test_start = test_start_date - window_time_span
  39. train_mask = (data['date'] >= pd.to_datetime(args.train_start_date)) & \
  40. (data['date'] <= pd.to_datetime(args.train_end_date))
  41. val_mask = (data['date'] >= adjusted_val_start) & \
  42. (data['date'] <= pd.to_datetime(args.val_end_date))
  43. test_mask = (data['date'] >= adjusted_test_start) & \
  44. (data['date'] <= pd.to_datetime(args.test_end_date))
  45. train_data = data[train_mask].reset_index(drop=True)
  46. val_data = data[val_mask].reset_index(drop=True)
  47. test_data = data[test_mask].reset_index(drop=True)
  48. train_data = train_data.drop(columns=['date'])
  49. val_data = val_data.drop(columns=['date'])
  50. test_data = test_data.drop(columns=['date'])
  51. # 创建数据集
  52. train_supervised = DataPreprocessor.create_supervised_dataset(args, train_data, 1)
  53. val_supervised = DataPreprocessor.create_supervised_dataset(args, val_data, 1)
  54. test_supervised = DataPreprocessor.create_supervised_dataset(args, test_data, args.step_size)
  55. # 转换为DataLoader
  56. train_loader = DataPreprocessor.load_data(args, train_supervised, shuffle=True)
  57. val_loader = DataPreprocessor.load_data(args, val_supervised, shuffle=False)
  58. test_loader = DataPreprocessor.load_data(args, test_supervised, shuffle=False)
  59. return train_loader, val_loader, test_loader, data
  60. @staticmethod
  61. def read_and_combine_csv_files(args):
  62. """读取文件并进行特征筛选和预处理"""
  63. current_dir = os.path.dirname(__file__)
  64. parent_dir = os.path.dirname(current_dir)
  65. args.data_dir = os.path.join(parent_dir, args.data_dir)
  66. def read_file(file_count):
  67. file_name = args.file_pattern.format(file_count)
  68. file_path = os.path.join(args.data_dir, file_name)
  69. try:
  70. df = pd.read_csv(file_path)
  71. # 确保只读取需要的列,若列不存在则会报错提示
  72. return df[DataPreprocessor.COLUMNS_TO_KEEP]
  73. except KeyError as e:
  74. print(f"文件 {file_name} 中缺少列: {e}")
  75. raise
  76. file_indices = list(range(args.start_files, args.end_files + 1))
  77. max_workers = os.cpu_count()
  78. with ThreadPoolExecutor(max_workers=max_workers) as executor:
  79. results = list(tqdm(executor.map(read_file, file_indices),
  80. total=len(file_indices),
  81. desc="正在读取文件"))
  82. all_data = pd.concat(results, ignore_index=True)
  83. # 确保列顺序一致
  84. all_data = all_data[DataPreprocessor.COLUMNS_TO_KEEP]
  85. # 下采样
  86. chunk = all_data.iloc[::args.resolution, :].reset_index(drop=True)
  87. # 处理特征
  88. chunk = DataPreprocessor.process_date(chunk, args)
  89. chunk = DataPreprocessor.scaler_data(chunk, args)
  90. return chunk
  91. @staticmethod
  92. def process_date(data, args):
  93. data = data.rename(columns={'index': 'date'})
  94. data['date'] = pd.to_datetime(data['date'])
  95. time_features = []
  96. # 固定生成分钟级和日级特征,保持与Predictor一致
  97. data['minute_of_day'] = data['date'].dt.hour * 60 + data['date'].dt.minute
  98. data['minute_sin'] = np.sin(2 * np.pi * data['minute_of_day'] / 1440)
  99. data['minute_cos'] = np.cos(2 * np.pi * data['minute_of_day'] / 1440)
  100. data['day_of_year'] = data['date'].dt.dayofyear
  101. data['day_year_sin'] = np.sin(2 * np.pi * data['day_of_year'] / 366)
  102. data['day_year_cos'] = np.cos(2 * np.pi * data['day_of_year'] / 366)
  103. time_features.extend(['minute_sin', 'minute_cos', 'day_year_sin', 'day_year_cos'])
  104. data.drop(columns=['minute_of_day', 'day_of_year'], inplace=True)
  105. other_columns = [col for col in data.columns if col not in ['date'] and col not in time_features]
  106. data = data[['date'] + time_features + other_columns]
  107. return data
  108. @staticmethod
  109. def scaler_data(data, args):
  110. date_col = data[['date']]
  111. data_to_scale = data.drop(columns=['date'])
  112. scaler = MinMaxScaler(feature_range=(0, 1))
  113. scaled_data = scaler.fit_transform(data_to_scale)
  114. joblib.dump(scaler, args.scaler_path)
  115. scaled_data = pd.DataFrame(scaled_data, columns=data_to_scale.columns)
  116. scaled_data = pd.concat([date_col.reset_index(drop=True), scaled_data], axis=1)
  117. return scaled_data
  118. @staticmethod
  119. def create_supervised_dataset(args, data, step_size):
  120. data = pd.DataFrame(data)
  121. cols = []
  122. col_names = []
  123. feature_columns = data.columns.tolist()
  124. # 输入序列
  125. for col in feature_columns:
  126. for i in range(args.seq_len - 1, -1, -1):
  127. cols.append(data[[col]].shift(i))
  128. col_names.append(f"{col}(t-{i})")
  129. # 目标序列 (取最后labels_num列)
  130. target_columns = feature_columns[-args.labels_num:]
  131. for i in range(1, args.output_size + 1):
  132. for col in target_columns:
  133. cols.append(data[[col]].shift(-i))
  134. col_names.append(f"{col}(t+{i})")
  135. dataset = pd.concat(cols, axis=1)
  136. dataset.columns = col_names
  137. dataset = dataset.iloc[::step_size, :]
  138. dataset.dropna(inplace=True)
  139. return dataset
  140. @staticmethod
  141. def load_data(args, dataset, shuffle):
  142. input_length = args.seq_len
  143. n_features = args.feature_num
  144. labels_num = args.labels_num
  145. n_features_total = n_features * input_length
  146. n_labels_total = args.output_size * labels_num
  147. X = dataset.values[:, :n_features_total]
  148. y = dataset.values[:, n_features_total:n_features_total + n_labels_total]
  149. X = X.reshape(X.shape[0], input_length, n_features)
  150. X = torch.tensor(X, dtype=torch.float32).to(args.device)
  151. y = torch.tensor(y, dtype=torch.float32).to(args.device)
  152. dataset_tensor = TensorDataset(X, y)
  153. generator = torch.Generator()
  154. generator.manual_seed(args.random_seed)
  155. return DataLoader(dataset_tensor, batch_size=args.batch_size, shuffle=shuffle, generator=generator)