feat: 升级深度学习模型为 Temporal Fusion Transformer 架构

- 将 LSTMMLPRegressor 重构为 TemporalFusionRegressor,采用 Transformer Encoder 替代 LSTM
   - 新增 LearnedAttentionPooling 和 GatedResidualBlock 模块增强模型表达能力
   - 优化训练策略,使用 OneCycleLR 调度器和样本加权机制
   - 改进缺勤事件采样算法,基于压力、健康、家庭等维度更精确地计算缺勤时长
   - 更新 .gitignore 排除原始数据文件,删除不再使用的原始 CSV 文件
This commit is contained in:
2026-03-20 16:30:08 +08:00
parent ff0fbf96f7
commit 77e38fd15b
6 changed files with 225 additions and 12835 deletions

View File

@@ -55,8 +55,11 @@ STATIC_FEATURES = [
'岗位稳定性指数',
]
DEFAULT_EPOCHS = 80
DEFAULT_BATCH_SIZE = 256
EARLY_STOPPING_PATIENCE = 12
DEFAULT_BATCH_SIZE = 128
EARLY_STOPPING_PATIENCE = 16
TRANSFORMER_D_MODEL = 160
TRANSFORMER_HEADS = 5
TRANSFORMER_LAYERS = 3
BaseTorchModule = nn.Module if nn is not None else object
@@ -90,7 +93,46 @@ class SequenceStaticDataset(Dataset):
)
class LSTMMLPRegressor(BaseTorchModule):
class LearnedAttentionPooling(BaseTorchModule):
def __init__(self, hidden_dim: int):
super().__init__()
self.score = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, 1),
)
def forward(self, sequence_x: torch.Tensor) -> torch.Tensor:
attn_scores = self.score(sequence_x).squeeze(-1)
attn_weights = torch.softmax(attn_scores, dim=1)
return torch.sum(sequence_x * attn_weights.unsqueeze(-1), dim=1)
class GatedResidualBlock(BaseTorchModule):
def __init__(self, input_dim: int, hidden_dim: int, dropout: float = 0.15):
super().__init__()
self.proj = nn.Linear(input_dim, hidden_dim) if input_dim != hidden_dim else nn.Identity()
self.net = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, hidden_dim),
)
self.gate = nn.Sequential(
nn.Linear(hidden_dim * 2, hidden_dim),
nn.Sigmoid(),
)
self.out_norm = nn.LayerNorm(hidden_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = self.proj(x)
transformed = self.net(x)
gate = self.gate(torch.cat([residual, transformed], dim=-1))
return self.out_norm(residual + transformed * gate)
class TemporalFusionRegressor(BaseTorchModule):
def __init__(
self,
seq_num_dim: int,
@@ -110,43 +152,57 @@ class LSTMMLPRegressor(BaseTorchModule):
static_cat_dim = sum(embedding.embedding_dim for embedding in self.static_cat_embeddings)
seq_input_dim = seq_num_dim + seq_cat_dim
static_input_dim = static_num_dim + static_cat_dim
self.position_embedding = nn.Parameter(torch.randn(WINDOW_SIZE, TRANSFORMER_D_MODEL) * 0.02)
self.seq_projection = nn.Sequential(
nn.Linear(seq_input_dim, 128),
nn.LayerNorm(128),
nn.Linear(seq_input_dim, TRANSFORMER_D_MODEL),
nn.LayerNorm(TRANSFORMER_D_MODEL),
nn.GELU(),
nn.Dropout(0.15),
nn.Dropout(0.12),
)
self.lstm = nn.LSTM(
input_size=128,
hidden_size=96,
num_layers=2,
encoder_layer = nn.TransformerEncoderLayer(
d_model=TRANSFORMER_D_MODEL,
nhead=TRANSFORMER_HEADS,
dim_feedforward=TRANSFORMER_D_MODEL * 3,
dropout=0.15,
activation='gelu',
batch_first=True,
dropout=0.2,
bidirectional=True,
norm_first=True,
)
self.sequence_encoder = nn.TransformerEncoder(
encoder_layer,
num_layers=TRANSFORMER_LAYERS,
)
self.sequence_pool = LearnedAttentionPooling(TRANSFORMER_D_MODEL)
self.sequence_head = nn.Sequential(
nn.Linear(96 * 2 * 2, 128),
nn.Linear(TRANSFORMER_D_MODEL * 3, 192),
nn.LayerNorm(192),
nn.GELU(),
nn.Dropout(0.18),
nn.Linear(192, 128),
nn.GELU(),
nn.Dropout(0.2),
)
self.static_net = nn.Sequential(
nn.Linear(static_input_dim, 96),
nn.LayerNorm(96),
nn.GELU(),
nn.Dropout(0.15),
nn.Linear(96, 64),
nn.GELU(),
nn.Dropout(0.1),
GatedResidualBlock(static_input_dim, 128, dropout=0.15),
GatedResidualBlock(128, 96, dropout=0.12),
)
self.context_gate = nn.Sequential(
nn.Linear(128 + 96, 128 + 96),
nn.Sigmoid(),
)
self.fusion = nn.Sequential(
nn.Linear(128 + 64, 128),
nn.LayerNorm(128),
GatedResidualBlock(128 + 96, 160, dropout=0.18),
nn.Dropout(0.12),
nn.Linear(160, 96),
nn.GELU(),
nn.Dropout(0.2),
nn.Linear(128, 64),
nn.Dropout(0.08),
nn.Linear(96, 1),
)
self.shortcut_head = nn.Sequential(
nn.Linear(seq_num_dim + static_num_dim, 64),
nn.LayerNorm(64),
nn.GELU(),
nn.Dropout(0.1),
nn.Dropout(0.08),
nn.Linear(64, 1),
)
@@ -163,11 +219,12 @@ class LSTMMLPRegressor(BaseTorchModule):
seq_parts.append(seq_embedded)
seq_input = torch.cat(seq_parts, dim=-1)
seq_input = self.seq_projection(seq_input)
lstm_output, _ = self.lstm(seq_input)
sequence_last = lstm_output[:, -1, :]
sequence_mean = lstm_output.mean(dim=1)
sequence_repr = self.sequence_head(torch.cat([sequence_last, sequence_mean], dim=1))
seq_input = seq_input + self.position_embedding.unsqueeze(0)
sequence_context = self.sequence_encoder(seq_input)
sequence_last = sequence_context[:, -1, :]
sequence_mean = sequence_context.mean(dim=1)
sequence_attended = self.sequence_pool(sequence_context)
sequence_repr = self.sequence_head(torch.cat([sequence_last, sequence_mean, sequence_attended], dim=1))
static_parts = [static_num_x]
static_embedded = self._embed_categorical(static_cat_x, self.static_cat_embeddings)
@@ -177,7 +234,13 @@ class LSTMMLPRegressor(BaseTorchModule):
static_repr = self.static_net(static_input)
fused = torch.cat([sequence_repr, static_repr], dim=1)
return self.fusion(fused).squeeze(1)
fused = fused * self.context_gate(fused)
shortcut = self.shortcut_head(torch.cat([seq_num_x[:, -1, :], static_num_x], dim=1))
return (self.fusion(fused) + shortcut).squeeze(1)
class LSTMMLPRegressor(TemporalFusionRegressor):
pass
def is_available() -> bool:
@@ -413,6 +476,15 @@ def _evaluate_model(
return metrics['rmse'], metrics
def _compute_sample_weights(targets: torch.Tensor, target_transform: str) -> torch.Tensor:
if target_transform == 'log1p':
base_targets = torch.expm1(targets)
else:
base_targets = targets
normalized = torch.clamp(base_targets / 12.0, min=0.0, max=2.0)
return 1.0 + normalized * 0.8
def train_lstm_mlp(
train_df: pd.DataFrame,
test_df: pd.DataFrame,
@@ -455,29 +527,35 @@ def train_lstm_mlp(
else:
print('[lstm_mlp] Training device: CPU')
model = LSTMMLPRegressor(
model = TemporalFusionRegressor(
seq_num_dim=train_seq_num.shape[-1],
static_num_dim=train_static_num.shape[-1],
seq_cat_cardinalities=[len(category_maps[feature]) + 1 for feature in feature_layout['seq_cat_features']],
static_cat_cardinalities=[len(category_maps[feature]) + 1 for feature in feature_layout['static_cat_features']],
).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=0.0012, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.6, patience=4, min_lr=1e-5
)
criterion = nn.SmoothL1Loss(beta=0.35)
optimizer = torch.optim.AdamW(model.parameters(), lr=9e-4, weight_decay=3e-4)
criterion = nn.SmoothL1Loss(beta=0.28, reduction='none')
train_loader = DataLoader(
SequenceStaticDataset(train_seq_num, train_seq_cat, train_static_num, train_static_cat, y_train),
batch_size=batch_size,
shuffle=True,
drop_last=False,
)
val_loader = DataLoader(
SequenceStaticDataset(val_seq_num, val_seq_cat, val_static_num, val_static_cat, y_val),
batch_size=batch_size,
shuffle=False,
)
total_steps = max(20, epochs * max(1, len(train_loader)))
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=0.0014,
total_steps=total_steps,
pct_start=0.12,
div_factor=12.0,
final_div_factor=40.0,
)
best_state = None
best_metrics = None
@@ -496,15 +574,17 @@ def train_lstm_mlp(
optimizer.zero_grad(set_to_none=True)
predictions = model(batch_seq_num, batch_seq_cat, batch_static_num, batch_static_cat)
sample_weights = _compute_sample_weights(batch_target, target_transform)
loss = criterion(predictions, batch_target)
loss = (loss * sample_weights).mean()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
running_loss += float(loss.item()) * len(batch_target)
train_loss = running_loss / max(1, len(train_loader.dataset))
val_rmse, val_metrics = _evaluate_model(model, val_loader, device, target_transform)
scheduler.step(val_rmse)
improved = val_rmse + 1e-4 < best_val_rmse
if improved:
@@ -554,6 +634,7 @@ def train_lstm_mlp(
bundle = {
'state_dict': model.state_dict(),
'architecture': 'temporal_fusion_transformer',
'window_size': WINDOW_SIZE,
'target_transform': target_transform,
'feature_layout': feature_layout,
@@ -583,6 +664,7 @@ def train_lstm_mlp(
'sequence_window_size': WINDOW_SIZE,
'sequence_feature_names': SEQUENCE_FEATURES,
'static_feature_names': STATIC_FEATURES,
'deep_learning_architecture': 'temporal_fusion_transformer',
'deep_validation_r2': round(float(best_metrics['r2']), 4) if best_metrics else None,
},
}