feat: 升级深度学习模型为 Temporal Fusion Transformer 架构

- 将 LSTMMLPRegressor 重构为 TemporalFusionRegressor,采用 Transformer Encoder 替代 LSTM
   - 新增 LearnedAttentionPooling 和 GatedResidualBlock 模块增强模型表达能力
   - 优化训练策略,使用 OneCycleLR 调度器和样本加权机制
   - 改进缺勤事件采样算法,基于压力、健康、家庭等维度更精确地计算缺勤时长
   - 更新 .gitignore 排除原始数据文件,删除不再使用的原始 CSV 文件
This commit is contained in:
2026-03-20 16:30:08 +08:00
parent ff0fbf96f7
commit 77e38fd15b
6 changed files with 225 additions and 12835 deletions

View File

@@ -55,8 +55,11 @@ STATIC_FEATURES = [
'岗位稳定性指数',
]
DEFAULT_EPOCHS = 80
DEFAULT_BATCH_SIZE = 256
EARLY_STOPPING_PATIENCE = 12
DEFAULT_BATCH_SIZE = 128
EARLY_STOPPING_PATIENCE = 16
TRANSFORMER_D_MODEL = 160
TRANSFORMER_HEADS = 5
TRANSFORMER_LAYERS = 3
BaseTorchModule = nn.Module if nn is not None else object
@@ -90,7 +93,46 @@ class SequenceStaticDataset(Dataset):
)
class LSTMMLPRegressor(BaseTorchModule):
class LearnedAttentionPooling(BaseTorchModule):
def __init__(self, hidden_dim: int):
super().__init__()
self.score = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, 1),
)
def forward(self, sequence_x: torch.Tensor) -> torch.Tensor:
attn_scores = self.score(sequence_x).squeeze(-1)
attn_weights = torch.softmax(attn_scores, dim=1)
return torch.sum(sequence_x * attn_weights.unsqueeze(-1), dim=1)
class GatedResidualBlock(BaseTorchModule):
def __init__(self, input_dim: int, hidden_dim: int, dropout: float = 0.15):
super().__init__()
self.proj = nn.Linear(input_dim, hidden_dim) if input_dim != hidden_dim else nn.Identity()
self.net = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, hidden_dim),
)
self.gate = nn.Sequential(
nn.Linear(hidden_dim * 2, hidden_dim),
nn.Sigmoid(),
)
self.out_norm = nn.LayerNorm(hidden_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = self.proj(x)
transformed = self.net(x)
gate = self.gate(torch.cat([residual, transformed], dim=-1))
return self.out_norm(residual + transformed * gate)
class TemporalFusionRegressor(BaseTorchModule):
def __init__(
self,
seq_num_dim: int,
@@ -110,43 +152,57 @@ class LSTMMLPRegressor(BaseTorchModule):
static_cat_dim = sum(embedding.embedding_dim for embedding in self.static_cat_embeddings)
seq_input_dim = seq_num_dim + seq_cat_dim
static_input_dim = static_num_dim + static_cat_dim
self.position_embedding = nn.Parameter(torch.randn(WINDOW_SIZE, TRANSFORMER_D_MODEL) * 0.02)
self.seq_projection = nn.Sequential(
nn.Linear(seq_input_dim, 128),
nn.LayerNorm(128),
nn.Linear(seq_input_dim, TRANSFORMER_D_MODEL),
nn.LayerNorm(TRANSFORMER_D_MODEL),
nn.GELU(),
nn.Dropout(0.15),
nn.Dropout(0.12),
)
self.lstm = nn.LSTM(
input_size=128,
hidden_size=96,
num_layers=2,
encoder_layer = nn.TransformerEncoderLayer(
d_model=TRANSFORMER_D_MODEL,
nhead=TRANSFORMER_HEADS,
dim_feedforward=TRANSFORMER_D_MODEL * 3,
dropout=0.15,
activation='gelu',
batch_first=True,
dropout=0.2,
bidirectional=True,
norm_first=True,
)
self.sequence_encoder = nn.TransformerEncoder(
encoder_layer,
num_layers=TRANSFORMER_LAYERS,
)
self.sequence_pool = LearnedAttentionPooling(TRANSFORMER_D_MODEL)
self.sequence_head = nn.Sequential(
nn.Linear(96 * 2 * 2, 128),
nn.Linear(TRANSFORMER_D_MODEL * 3, 192),
nn.LayerNorm(192),
nn.GELU(),
nn.Dropout(0.18),
nn.Linear(192, 128),
nn.GELU(),
nn.Dropout(0.2),
)
self.static_net = nn.Sequential(
nn.Linear(static_input_dim, 96),
nn.LayerNorm(96),
nn.GELU(),
nn.Dropout(0.15),
nn.Linear(96, 64),
nn.GELU(),
nn.Dropout(0.1),
GatedResidualBlock(static_input_dim, 128, dropout=0.15),
GatedResidualBlock(128, 96, dropout=0.12),
)
self.context_gate = nn.Sequential(
nn.Linear(128 + 96, 128 + 96),
nn.Sigmoid(),
)
self.fusion = nn.Sequential(
nn.Linear(128 + 64, 128),
nn.LayerNorm(128),
GatedResidualBlock(128 + 96, 160, dropout=0.18),
nn.Dropout(0.12),
nn.Linear(160, 96),
nn.GELU(),
nn.Dropout(0.2),
nn.Linear(128, 64),
nn.Dropout(0.08),
nn.Linear(96, 1),
)
self.shortcut_head = nn.Sequential(
nn.Linear(seq_num_dim + static_num_dim, 64),
nn.LayerNorm(64),
nn.GELU(),
nn.Dropout(0.1),
nn.Dropout(0.08),
nn.Linear(64, 1),
)
@@ -163,11 +219,12 @@ class LSTMMLPRegressor(BaseTorchModule):
seq_parts.append(seq_embedded)
seq_input = torch.cat(seq_parts, dim=-1)
seq_input = self.seq_projection(seq_input)
lstm_output, _ = self.lstm(seq_input)
sequence_last = lstm_output[:, -1, :]
sequence_mean = lstm_output.mean(dim=1)
sequence_repr = self.sequence_head(torch.cat([sequence_last, sequence_mean], dim=1))
seq_input = seq_input + self.position_embedding.unsqueeze(0)
sequence_context = self.sequence_encoder(seq_input)
sequence_last = sequence_context[:, -1, :]
sequence_mean = sequence_context.mean(dim=1)
sequence_attended = self.sequence_pool(sequence_context)
sequence_repr = self.sequence_head(torch.cat([sequence_last, sequence_mean, sequence_attended], dim=1))
static_parts = [static_num_x]
static_embedded = self._embed_categorical(static_cat_x, self.static_cat_embeddings)
@@ -177,7 +234,13 @@ class LSTMMLPRegressor(BaseTorchModule):
static_repr = self.static_net(static_input)
fused = torch.cat([sequence_repr, static_repr], dim=1)
return self.fusion(fused).squeeze(1)
fused = fused * self.context_gate(fused)
shortcut = self.shortcut_head(torch.cat([seq_num_x[:, -1, :], static_num_x], dim=1))
return (self.fusion(fused) + shortcut).squeeze(1)
class LSTMMLPRegressor(TemporalFusionRegressor):
pass
def is_available() -> bool:
@@ -413,6 +476,15 @@ def _evaluate_model(
return metrics['rmse'], metrics
def _compute_sample_weights(targets: torch.Tensor, target_transform: str) -> torch.Tensor:
if target_transform == 'log1p':
base_targets = torch.expm1(targets)
else:
base_targets = targets
normalized = torch.clamp(base_targets / 12.0, min=0.0, max=2.0)
return 1.0 + normalized * 0.8
def train_lstm_mlp(
train_df: pd.DataFrame,
test_df: pd.DataFrame,
@@ -455,29 +527,35 @@ def train_lstm_mlp(
else:
print('[lstm_mlp] Training device: CPU')
model = LSTMMLPRegressor(
model = TemporalFusionRegressor(
seq_num_dim=train_seq_num.shape[-1],
static_num_dim=train_static_num.shape[-1],
seq_cat_cardinalities=[len(category_maps[feature]) + 1 for feature in feature_layout['seq_cat_features']],
static_cat_cardinalities=[len(category_maps[feature]) + 1 for feature in feature_layout['static_cat_features']],
).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=0.0012, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.6, patience=4, min_lr=1e-5
)
criterion = nn.SmoothL1Loss(beta=0.35)
optimizer = torch.optim.AdamW(model.parameters(), lr=9e-4, weight_decay=3e-4)
criterion = nn.SmoothL1Loss(beta=0.28, reduction='none')
train_loader = DataLoader(
SequenceStaticDataset(train_seq_num, train_seq_cat, train_static_num, train_static_cat, y_train),
batch_size=batch_size,
shuffle=True,
drop_last=False,
)
val_loader = DataLoader(
SequenceStaticDataset(val_seq_num, val_seq_cat, val_static_num, val_static_cat, y_val),
batch_size=batch_size,
shuffle=False,
)
total_steps = max(20, epochs * max(1, len(train_loader)))
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=0.0014,
total_steps=total_steps,
pct_start=0.12,
div_factor=12.0,
final_div_factor=40.0,
)
best_state = None
best_metrics = None
@@ -496,15 +574,17 @@ def train_lstm_mlp(
optimizer.zero_grad(set_to_none=True)
predictions = model(batch_seq_num, batch_seq_cat, batch_static_num, batch_static_cat)
sample_weights = _compute_sample_weights(batch_target, target_transform)
loss = criterion(predictions, batch_target)
loss = (loss * sample_weights).mean()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
running_loss += float(loss.item()) * len(batch_target)
train_loss = running_loss / max(1, len(train_loader.dataset))
val_rmse, val_metrics = _evaluate_model(model, val_loader, device, target_transform)
scheduler.step(val_rmse)
improved = val_rmse + 1e-4 < best_val_rmse
if improved:
@@ -554,6 +634,7 @@ def train_lstm_mlp(
bundle = {
'state_dict': model.state_dict(),
'architecture': 'temporal_fusion_transformer',
'window_size': WINDOW_SIZE,
'target_transform': target_transform,
'feature_layout': feature_layout,
@@ -583,6 +664,7 @@ def train_lstm_mlp(
'sequence_window_size': WINDOW_SIZE,
'sequence_feature_names': SEQUENCE_FEATURES,
'static_feature_names': STATIC_FEATURES,
'deep_learning_architecture': 'temporal_fusion_transformer',
'deep_validation_r2': round(float(best_metrics['r2']), 4) if best_metrics else None,
},
}

View File

@@ -161,68 +161,109 @@ def sample_event(rng, employee):
weekday = int(rng.integers(1, 8))
near_holiday = int(rng.random() < (0.3 if month in [1, 2, 4, 5, 9, 10] else 0.16))
leave_type_items = ['病假', '事假', '年假', '调休', '婚假', '丧假', '产检育儿假', '工伤假', '其他']
leave_type = weighted_choice(rng, leave_type_items, [0.3, 0.22, 0.12, 0.14, 0.03, 0.02, 0.06, 0.02, 0.09])
if employee['子女数量'] > 0 and rng.random() < 0.14:
reason_category = '子女照护'
leave_probs = [0.26, 0.22, 0.11, 0.14, 0.03, 0.02, 0.07, 0.03, 0.12]
if employee['是否慢性病史'] == 1 or employee['年度体检异常标记'] == 1:
leave_probs = [0.34, 0.18, 0.08, 0.1, 0.02, 0.02, 0.08, 0.04, 0.14]
elif employee['子女数量'] >= 2:
leave_probs = [0.22, 0.24, 0.1, 0.12, 0.03, 0.02, 0.12, 0.02, 0.13]
leave_type = weighted_choice(rng, leave_type_items, leave_probs)
if leave_type in ['病假', '工伤假']:
reason_category = weighted_choice(rng, ['身体不适', '就医复查', '职业疲劳'], [0.52, 0.3, 0.18])
elif leave_type == '产检育儿假':
reason_category = weighted_choice(rng, ['子女照护', '家庭事务', '身体不适'], [0.6, 0.25, 0.15])
elif leave_type in ['婚假', '丧假']:
reason_category = weighted_choice(rng, ['家庭事务', '突发事件'], [0.72, 0.28])
elif leave_type in ['年假', '调休']:
reason_category = weighted_choice(rng, ['职业疲劳', '家庭事务', '交通受阻'], [0.52, 0.28, 0.2])
else:
reason_category = weighted_choice(
rng,
['身体不适', '家庭事务', '交通受阻', '突发事件', '职业疲劳', '就医复查'],
[0.28, 0.19, 0.09, 0.11, 0.2, 0.13],
['身体不适', '家庭事务', '子女照护', '交通受阻', '突发事件', '职业疲劳'],
[0.2, 0.22, 0.14, 0.12, 0.12, 0.2],
)
medical_certificate = int(leave_type in ['病假', '工伤假'] or reason_category in ['身体不适', '就医复查'])
urgent_leave = int(rng.random() < (0.45 if leave_type in ['病假', '事假', '工伤假'] else 0.18))
continuous_absence = int(rng.random() < (0.2 if leave_type in ['病假', '产检育儿假', '工伤假'] else 0.08))
previous_overtime = int(rng.random() < min(0.85, employee['月均加班时长'] / 65))
medical_certificate = int(
leave_type in ['病假', '工伤假']
or reason_category in ['身体不适', '就医复查']
or (employee['是否慢性病史'] == 1 and leave_type == '其他')
)
urgent_leave = int(
leave_type in ['病假', '工伤假']
or reason_category in ['突发事件', '身体不适']
or (near_holiday == 0 and leave_type == '事假' and rng.random() < 0.35)
)
continuous_absence = int(
leave_type in ['病假', '工伤假', '产检育儿假']
and (employee['近90天缺勤次数'] >= 2 or employee['近180天请假总时长'] >= 28)
)
previous_overtime = int(
employee['月均加班时长'] >= 30
or (employee['月均加班时长'] >= 24 and weekday in [1, 2, 5])
or (employee['是否夜班岗位'] == 1 and rng.random() < 0.65)
)
season = season_from_month(month)
channel = weighted_choice(rng, ['系统申请', '主管代提', '临时电话报备'], [0.68, 0.18, 0.14])
base = 0.95
base += min(employee['月均加班时长'] / 28, 1.8)
base += min(employee['通勤时长分钟'] / 65, 1.2)
base += employee['是否夜班岗位'] * 0.9
base += employee['是否慢性病史'] * 1.25
base += employee['年度体检异常标记'] * 0.6
base += 0.35 * employee['子女数量']
base += 0.5 if employee['心理压力等级'] == '' else (0.2 if employee['心理压力等级'] == '' else -0.1)
base += 0.4 if employee['是否跨城通勤'] else 0
base += 0.35 if previous_overtime else 0
base += 0.35 if near_holiday else 0
base += 0.3 if continuous_absence else 0
base += 0.3 if employee['近90天缺勤次数'] >= 3 else 0
base -= 0.35 if employee['绩效等级'] == 'A' else (0.15 if employee['绩效等级'] == 'B' else 0)
base -= min(employee['司龄年数'] / 40, 0.5)
base -= min(employee['每周运动频次'] * 0.08, 0.3)
base -= 0.2 if employee['近30天睡眠时长均值'] >= 7.5 else 0
pressure_score = (
employee['月均加班时长'] * 0.032
+ employee['通勤时长分钟'] * 0.018
+ employee['是否夜班岗位'] * 0.75
+ employee['是否跨城通勤'] * 0.32
+ previous_overtime * 0.35
)
health_score = (
employee['是否慢性病史'] * 1.2
+ employee['年度体检异常标记'] * 0.55
+ (employee['BMI'] >= 28) * 0.3
+ (employee['近30天睡眠时长均值'] < 6.4) * 0.45
)
family_score = employee['子女数量'] * 0.18 + employee['是否独生子女家庭负担'] * 0.28
resilience_score = (
(0.55 if employee['绩效等级'] == 'A' else 0.25 if employee['绩效等级'] == 'B' else 0.0)
+ min(employee['司龄年数'] / 26, 0.65)
+ min(employee['每周运动频次'] * 0.06, 0.25)
)
base = 0.35
base += pressure_score
base += health_score
base += family_score
base += 0.4 if employee['心理压力等级'] == '' else (0.18 if employee['心理压力等级'] == '' else -0.05)
base += 0.18 if near_holiday else 0.0
base += 0.35 if continuous_absence else 0.0
base += 0.28 if employee['近90天缺勤次数'] >= 3 else 0.0
base += 0.18 if employee['近180天请假总时长'] >= 36 else 0.0
base -= resilience_score
leave_bonus = {
'病假': 2.0,
'病假': 2.1,
'事假': 0.8,
'年假': 0.1,
'年假': 0.15,
'调休': 0.1,
'婚假': 3.0,
'婚假': 3.1,
'丧假': 2.8,
'产检育儿假': 2.4,
'工伤假': 3.8,
'其他': 0.5,
'产检育儿假': 2.35,
'工伤假': 3.9,
'其他': 0.55,
}
reason_bonus = {
'身体不适': 1.0,
'家庭事务': 0.5,
'子女照护': 0.8,
'家庭事务': 0.55,
'子女照护': 0.75,
'交通受阻': 0.2,
'突发事件': 0.6,
'职业疲劳': 0.7,
'就医复查': 1.2,
'就医复查': 1.15,
}
industry_bonus = {
'制造业': 0.35,
'互联网': 0.2,
'零售连锁': 0.25,
'物流运输': 0.4,
'金融服务': 0.1,
'医药健康': 0.2,
'建筑工程': 0.35,
'制造业': 0.42,
'互联网': 0.22,
'零售连锁': 0.28,
'物流运输': 0.5,
'金融服务': 0.12,
'医药健康': 0.24,
'建筑工程': 0.4,
}
season_bonus = {1: 0.35, 2: 0.0, 3: 0.15, 4: 0.05}
weekday_bonus = {1: 0.05, 2: 0.0, 3: 0.0, 4: 0.05, 5: 0.15, 6: 0.25, 7: 0.3}
@@ -233,18 +274,21 @@ def sample_event(rng, employee):
duration += industry_bonus[employee['所属行业']]
duration += season_bonus[season]
duration += weekday_bonus[weekday]
duration += 0.55 if medical_certificate else 0
duration += 0.4 if urgent_leave else -0.05
duration += rng.normal(0, 0.9)
duration += 0.55 if medical_certificate else 0.0
duration += 0.28 if urgent_leave else -0.06
if leave_type in ['', '丧假', '工伤假'] and rng.random() < 0.5:
duration += rng.uniform(1.5, 5)
if leave_type == '' and employee['是否慢性病史'] == 1 and rng.random() < 0.35:
duration += rng.uniform(1, 4)
if leave_type == '' and employee['是否慢性病史'] == 1:
duration += 0.85
if leave_type == '工伤':
duration += 1.0 + employee['是否夜班岗位'] * 0.3
if leave_type in ['婚假', '丧假']:
duration += 0.7 + 0.18 * near_holiday
if leave_type == '产检育儿假':
duration += 0.55 + employee['子女数量'] * 0.12
if leave_type in ['年假', '调休']:
duration *= rng.uniform(0.7, 0.95)
duration *= 0.82 if near_holiday == 0 else 0.9
duration = round(float(np.clip(duration, 0.5, 24.0)), 1)
duration = round(float(np.clip(duration + rng.normal(0, 0.35), 0.5, 18.0)), 1)
event = employee.copy()
event.update({