| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165 |
- # data_trainer.py
- import torch
- import joblib
- import numpy as np
- import pandas as pd
- from sklearn.metrics import r2_score
- from datetime import datetime, timedelta
- from sklearn.preprocessing import MinMaxScaler
- class Trainer:
- def __init__(self, model, args, data):
- self.args = args
- self.model = model
- self.data = data
- self.patience = args.patience
- self.min_delta = args.min_delta
- self.counter = 0
- self.early_stop = False
- self.best_val_loss = float('inf')
- self.best_model_state = None
- self.best_epoch = 0
- def train_full_model(self, train_loader, val_loader, optimizer, criterion, scheduler):
- self.counter = 0
- self.best_val_loss = float('inf')
- self.early_stop = False
- self.best_model_state = None
- self.best_epoch = 0
- max_epochs = self.args.epochs
- for epoch in range(max_epochs):
- self.model.train()
- running_loss = 0.0
-
- for inputs, targets in train_loader:
- inputs = inputs.to(self.args.device)
- targets = targets.to(self.args.device)
- optimizer.zero_grad()
- outputs = self.model(inputs)
- loss = criterion(outputs, targets)
- loss.backward()
- optimizer.step()
- running_loss += loss.item()
-
- train_loss = running_loss / len(train_loader)
- val_loss = self.validate_full(val_loader, criterion) if val_loader else 0.0
- print(f'Epoch {epoch+1}/{max_epochs}, Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}')
- if val_loader:
- if val_loss < (self.best_val_loss - self.min_delta):
- self.best_val_loss = val_loss
- self.counter = 0
- self.best_model_state = self.model.state_dict()
- self.best_epoch = epoch
- else:
- self.counter += 1
- if self.counter >= self.patience:
- self.early_stop = True
- print(f"早停触发")
-
- scheduler.step()
- torch.cuda.empty_cache()
- if self.early_stop:
- break
- if self.best_model_state is not None:
- self.model.load_state_dict(self.best_model_state)
- print(f"最佳迭代: {self.best_epoch+1}, 最佳验证损失: {self.best_val_loss:.6f}")
- return self.model
- def validate_full(self, val_loader, criterion):
- self.model.eval()
- total_loss = 0.0
- with torch.no_grad():
- for inputs, targets in val_loader:
- inputs = inputs.to(self.args.device)
- targets = targets.to(self.args.device)
- outputs = self.model(inputs)
- loss = criterion(outputs, targets)
- total_loss += loss.item()
- return total_loss / len(val_loader)
- def save_model(self):
- torch.save(self.model.state_dict(), self.args.model_path)
- print(f"模型已保存到:{self.args.model_path}")
-
- def evaluate_model(self, test_loader, criterion):
- self.model.eval()
- scaler = joblib.load(self.args.scaler_path)
- predictions = []
- true_values = []
-
- with torch.no_grad():
- for inputs, targets in test_loader:
- inputs = inputs.to(self.args.device)
- targets = targets.to(self.args.device)
- outputs = self.model(inputs)
- predictions.append(outputs.cpu().numpy())
- true_values.append(targets.cpu().numpy())
-
- predictions = np.concatenate(predictions, axis=0)
- true_values = np.concatenate(true_values, axis=0)
-
- # 重塑
- reshaped_predictions = predictions.reshape(predictions.shape[0], self.args.output_size, self.args.labels_num)
- predictions = reshaped_predictions.reshape(-1, self.args.labels_num)
-
- reshaped_true_values = true_values.reshape(true_values.shape[0], self.args.output_size, self.args.labels_num)
- true_values = reshaped_true_values.reshape(-1, self.args.labels_num)
-
- # 反归一化 (仅标签列)
- column_scaler = MinMaxScaler(feature_range=(0, 1))
- column_scaler.min_ = scaler.min_[-self.args.labels_num:]
- column_scaler.scale_ = scaler.scale_[-self.args.labels_num:]
-
- true_values = column_scaler.inverse_transform(true_values)
- predictions = column_scaler.inverse_transform(predictions)
-
- # 定义4个核心变量
- column_names = [
- "AR.UF1_SSD_KMYC", # UF1跨膜压差
- "AR.RO1_2D_YC", # RO1二段压差
- "AR.PUBLIC_BY_REAL_1", # RO1三段压差
- "1#RO_CSFLOW", # 1#RO产水流量
- ]
-
- # 生成时间
- start_datetime = datetime.strptime(self.args.test_start_date, "%Y-%m-%d")
- time_interval = timedelta(minutes=(4 * self.args.resolution / 60))
- total_points = len(predictions)
- date_times = [start_datetime + i * time_interval for i in range(total_points)]
-
- results = pd.DataFrame({'date': date_times})
- metrics_details = []
-
- for i, col_name in enumerate(column_names):
- if i >= self.args.labels_num: break # 防止越界
-
- results[f'{col_name}_True'] = true_values[:, i]
- results[f'{col_name}_Predicted'] = predictions[:, i]
-
- var_true = true_values[:, i]
- var_pred = predictions[:, i]
-
- # 指标计算
- non_zero_mask = var_true != 0
- var_true_nonzero = var_true[non_zero_mask]
- var_pred_nonzero = var_pred[non_zero_mask]
-
- if len(var_true_nonzero) > 0:
- r2 = r2_score(var_true_nonzero, var_pred_nonzero)
- rmse = np.sqrt(np.mean((var_true_nonzero - var_pred_nonzero) ** 2))
- mape = np.mean(np.abs((var_true_nonzero - var_pred_nonzero) / np.abs(var_true_nonzero))) * 100
- metrics_details.append(f"{col_name}: R2={r2:.4f}, RMSE={rmse:.4f}, MAPE={mape:.4f}%")
- else:
- metrics_details.append(f"{col_name}: 无效数据")
- results.to_csv(self.args.output_csv_path, index=False)
-
- txt_path = self.args.output_csv_path.replace('.csv', '_metrics.txt')
- with open(txt_path, 'w') as f:
- f.write('\n'.join(metrics_details))
-
- return metrics_details
|