feat: 初始化员工缺勤分析系统项目
搭建完整的前后端分离架构,实现数据概览、预测分析、聚类分析等核心功能模块 详细版: feat: 初始化员工缺勤分析系统项目 - 后端:基于 Flask 搭建 RESTful API,包含数据概览、特征分析、预测模型、聚类分析四大模块 - 前端:基于 Vue.js 构建单页应用,实现 Dashboard、预测、聚类、因子分析等页面 - 模型:集成随机森林、XGBoost、LightGBM、Stacking 等多种机器学习模型 - 文档:完成需求规格说明、系统架构设计、接口设计、数据设计、UI原型设计等文档
This commit is contained in:
4
backend/core/__init__.py
Normal file
4
backend/core/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .preprocessing import DataPreprocessor, get_clean_data, save_clean_data
|
||||
from .feature_mining import calculate_correlation, get_correlation_for_heatmap, group_comparison
|
||||
from .train_model import OptimizedModelTrainer, train_and_save_models
|
||||
from .clustering import KMeansAnalyzer, kmeans_analyzer
|
||||
229
backend/core/clustering.py
Normal file
229
backend/core/clustering.py
Normal file
@@ -0,0 +1,229 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from sklearn.cluster import KMeans
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
import joblib
|
||||
import os
|
||||
|
||||
import config
|
||||
from core.preprocessing import get_clean_data
|
||||
|
||||
|
||||
class KMeansAnalyzer:
|
||||
def __init__(self, n_clusters=3):
|
||||
self.n_clusters = n_clusters
|
||||
self.model = None
|
||||
self.scaler = MinMaxScaler()
|
||||
self.data = None
|
||||
self.data_scaled = None
|
||||
self.labels = None
|
||||
|
||||
def _get_feature_columns(self, df):
|
||||
df.columns = [col.strip() for col in df.columns]
|
||||
|
||||
feature_map = {
|
||||
'Age': None,
|
||||
'Service time': None,
|
||||
'Work load Average/day': None,
|
||||
'Body mass index': None,
|
||||
'Absenteeism time in hours': None
|
||||
}
|
||||
|
||||
for key in feature_map:
|
||||
if key in df.columns:
|
||||
feature_map[key] = key
|
||||
else:
|
||||
for col in df.columns:
|
||||
if key.replace(' ', '').lower() == col.replace(' ', '').lower():
|
||||
feature_map[key] = col
|
||||
break
|
||||
|
||||
actual_features = [v for v in feature_map.values() if v is not None]
|
||||
return actual_features
|
||||
|
||||
def fit(self, n_clusters=None):
|
||||
if n_clusters:
|
||||
self.n_clusters = n_clusters
|
||||
|
||||
df = get_clean_data()
|
||||
df = df.reset_index(drop=True)
|
||||
|
||||
feature_cols = self._get_feature_columns(df)
|
||||
|
||||
if not feature_cols:
|
||||
feature_cols = ['Age', 'Service time', 'Body mass index', 'Absenteeism time in hours']
|
||||
feature_cols = [c for c in feature_cols if c in df.columns]
|
||||
|
||||
self.data = df[feature_cols].values
|
||||
|
||||
self.scaler = MinMaxScaler()
|
||||
self.data_scaled = self.scaler.fit_transform(self.data)
|
||||
|
||||
self.model = KMeans(
|
||||
n_clusters=self.n_clusters,
|
||||
random_state=config.RANDOM_STATE,
|
||||
n_init=10
|
||||
)
|
||||
|
||||
self.labels = self.model.fit_predict(self.data_scaled)
|
||||
|
||||
return self.model
|
||||
|
||||
def get_cluster_results(self, n_clusters=3):
|
||||
if self.model is None or self.n_clusters != n_clusters:
|
||||
self.fit(n_clusters)
|
||||
|
||||
centers = self.scaler.inverse_transform(self.model.cluster_centers_)
|
||||
|
||||
unique, counts = np.unique(self.labels, return_counts=True)
|
||||
total = len(self.labels)
|
||||
|
||||
cluster_names = self._generate_cluster_names(centers)
|
||||
|
||||
feature_cols = self._get_feature_columns(get_clean_data())
|
||||
|
||||
clusters = []
|
||||
for i, (cluster_id, count) in enumerate(zip(unique, counts)):
|
||||
center_dict = {}
|
||||
for j, fname in enumerate(feature_cols):
|
||||
if j < len(centers[i]):
|
||||
center_dict[fname] = round(centers[i][j], 2)
|
||||
|
||||
clusters.append({
|
||||
'id': int(cluster_id),
|
||||
'name': cluster_names.get(cluster_id, f'群体{cluster_id+1}'),
|
||||
'member_count': int(count),
|
||||
'percentage': round(count / total * 100, 1),
|
||||
'center': center_dict,
|
||||
'description': self._generate_description(cluster_names.get(cluster_id, ''))
|
||||
})
|
||||
|
||||
return {
|
||||
'n_clusters': self.n_clusters,
|
||||
'clusters': clusters
|
||||
}
|
||||
|
||||
def get_cluster_profile(self, n_clusters=3):
|
||||
if self.model is None or self.n_clusters != n_clusters:
|
||||
self.fit(n_clusters)
|
||||
|
||||
centers_scaled = self.model.cluster_centers_
|
||||
|
||||
df = get_clean_data()
|
||||
df.columns = [col.strip() for col in df.columns]
|
||||
feature_cols = self._get_feature_columns(df)
|
||||
|
||||
dimensions = ['年龄', '工龄', '工作负荷', 'BMI', '缺勤倾向'][:len(feature_cols)]
|
||||
|
||||
cluster_names = self._generate_cluster_names(
|
||||
self.scaler.inverse_transform(centers_scaled)
|
||||
)
|
||||
|
||||
clusters = []
|
||||
for i in range(self.n_clusters):
|
||||
clusters.append({
|
||||
'id': i,
|
||||
'name': cluster_names.get(i, f'群体{i+1}'),
|
||||
'values': [round(v, 2) for v in centers_scaled[i]]
|
||||
})
|
||||
|
||||
return {
|
||||
'dimensions': dimensions,
|
||||
'dimension_keys': feature_cols,
|
||||
'clusters': clusters
|
||||
}
|
||||
|
||||
def get_scatter_data(self, n_clusters=3, x_axis='Age', y_axis='Absenteeism time in hours'):
|
||||
if self.model is None or self.n_clusters != n_clusters:
|
||||
self.fit(n_clusters)
|
||||
|
||||
df = get_clean_data()
|
||||
df = df.reset_index(drop=True)
|
||||
df.columns = [col.strip() for col in df.columns]
|
||||
|
||||
x_col = None
|
||||
y_col = None
|
||||
|
||||
for col in df.columns:
|
||||
if x_axis.replace(' ', '').lower() in col.replace(' ', '').lower():
|
||||
x_col = col
|
||||
if y_axis.replace(' ', '').lower() in col.replace(' ', '').lower():
|
||||
y_col = col
|
||||
|
||||
if x_col is None:
|
||||
x_col = df.columns[0]
|
||||
if y_col is None:
|
||||
y_col = df.columns[-1]
|
||||
|
||||
points = []
|
||||
for idx in range(min(len(df), len(self.labels))):
|
||||
row = df.iloc[idx]
|
||||
points.append({
|
||||
'employee_id': int(row['ID']),
|
||||
'x': float(row[x_col]),
|
||||
'y': float(row[y_col]),
|
||||
'cluster_id': int(self.labels[idx])
|
||||
})
|
||||
|
||||
cluster_colors = {
|
||||
'0': '#67C23A',
|
||||
'1': '#E6A23C',
|
||||
'2': '#F56C6C',
|
||||
'3': '#909399',
|
||||
'4': '#409EFF'
|
||||
}
|
||||
|
||||
return {
|
||||
'x_axis': x_col,
|
||||
'x_axis_name': config.FEATURE_NAME_CN.get(x_col, x_col),
|
||||
'y_axis': y_col,
|
||||
'y_axis_name': config.FEATURE_NAME_CN.get(y_col, y_col),
|
||||
'points': points[:500],
|
||||
'cluster_colors': cluster_colors
|
||||
}
|
||||
|
||||
def _generate_cluster_names(self, centers):
|
||||
names = {}
|
||||
|
||||
for i, center in enumerate(centers):
|
||||
if len(center) >= 5:
|
||||
service_time = center[1]
|
||||
work_load = center[2]
|
||||
bmi = center[3]
|
||||
absent = center[4]
|
||||
else:
|
||||
service_time = center[1] if len(center) > 1 else 0
|
||||
work_load = 0
|
||||
bmi = center[2] if len(center) > 2 else 0
|
||||
absent = center[3] if len(center) > 3 else 0
|
||||
|
||||
if service_time > 15 and absent < 3:
|
||||
names[i] = '模范型员工'
|
||||
elif work_load > 260 and absent > 5:
|
||||
names[i] = '压力型员工'
|
||||
elif bmi > 28:
|
||||
names[i] = '生活习惯型员工'
|
||||
else:
|
||||
names[i] = f'群体{i+1}'
|
||||
|
||||
return names
|
||||
|
||||
def _generate_description(self, name):
|
||||
descriptions = {
|
||||
'模范型员工': '工龄长、工作稳定、缺勤率低',
|
||||
'压力型员工': '工作负荷大、缺勤较多',
|
||||
'生活习惯型员工': 'BMI偏高、需关注健康'
|
||||
}
|
||||
return descriptions.get(name, '常规员工群体')
|
||||
|
||||
def save_model(self):
|
||||
os.makedirs(config.MODELS_DIR, exist_ok=True)
|
||||
joblib.dump(self.model, config.KMEANS_MODEL_PATH)
|
||||
|
||||
def load_model(self):
|
||||
if os.path.exists(config.KMEANS_MODEL_PATH):
|
||||
self.model = joblib.load(config.KMEANS_MODEL_PATH)
|
||||
self.n_clusters = self.model.n_clusters
|
||||
|
||||
|
||||
kmeans_analyzer = KMeansAnalyzer()
|
||||
151
backend/core/feature_mining.py
Normal file
151
backend/core/feature_mining.py
Normal file
@@ -0,0 +1,151 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
import config
|
||||
from core.preprocessing import get_clean_data
|
||||
|
||||
|
||||
def calculate_correlation():
|
||||
df = get_clean_data()
|
||||
|
||||
numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
|
||||
|
||||
if 'ID' in numeric_cols:
|
||||
numeric_cols.remove('ID')
|
||||
|
||||
corr_matrix = df[numeric_cols].corr()
|
||||
|
||||
return corr_matrix
|
||||
|
||||
|
||||
def get_correlation_for_heatmap():
|
||||
corr_matrix = calculate_correlation()
|
||||
|
||||
key_features = [
|
||||
'Age',
|
||||
'Service time',
|
||||
'Distance from Residence to Work',
|
||||
'Work load Average/day ',
|
||||
'Body mass index',
|
||||
'Absenteeism time in hours'
|
||||
]
|
||||
|
||||
key_features = [f for f in key_features if f in corr_matrix.columns]
|
||||
|
||||
sub_matrix = corr_matrix.loc[key_features, key_features]
|
||||
|
||||
result = {
|
||||
'features': [config.FEATURE_NAME_CN.get(f, f) for f in key_features],
|
||||
'matrix': sub_matrix.values.round(2).tolist()
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def calculate_feature_importance(model, feature_names):
|
||||
if hasattr(model, 'feature_importances_'):
|
||||
importance = model.feature_importances_
|
||||
else:
|
||||
raise ValueError("Model does not have feature_importances_ attribute")
|
||||
|
||||
importance_dict = dict(zip(feature_names, importance))
|
||||
|
||||
sorted_importance = sorted(importance_dict.items(), key=lambda x: x[1], reverse=True)
|
||||
|
||||
return sorted_importance
|
||||
|
||||
|
||||
def get_feature_importance_from_model(model_path, feature_names):
|
||||
import joblib
|
||||
|
||||
model = joblib.load(model_path)
|
||||
return calculate_feature_importance(model, feature_names)
|
||||
|
||||
|
||||
def group_comparison(dimension):
|
||||
df = get_clean_data()
|
||||
|
||||
dimension_map = {
|
||||
'drinker': ('Social drinker', {0: '不饮酒', 1: '饮酒'}),
|
||||
'smoker': ('Social smoker', {0: '不吸烟', 1: '吸烟'}),
|
||||
'education': ('Education', {1: '高中', 2: '本科', 3: '研究生', 4: '博士'}),
|
||||
'children': ('Son', {0: '无子女'}, lambda x: x > 0, '有子女'),
|
||||
'pet': ('Pet', {0: '无宠物'}, lambda x: x > 0, '有宠物')
|
||||
}
|
||||
|
||||
if dimension not in dimension_map:
|
||||
raise ValueError(f"Invalid dimension: {dimension}")
|
||||
|
||||
col, value_map = dimension_map[dimension][0], dimension_map[dimension][1]
|
||||
|
||||
if dimension in ['children', 'pet']:
|
||||
threshold_fn = dimension_map[dimension][2]
|
||||
other_label = dimension_map[dimension][3]
|
||||
|
||||
groups = []
|
||||
for val in [0]:
|
||||
group_df = df[df[col] == val]
|
||||
if len(group_df) > 0:
|
||||
groups.append({
|
||||
'name': value_map.get(val, str(val)),
|
||||
'value': val,
|
||||
'avg_hours': round(group_df['Absenteeism time in hours'].mean(), 2),
|
||||
'count': len(group_df),
|
||||
'percentage': round(len(group_df) / len(df) * 100, 1)
|
||||
})
|
||||
|
||||
group_df = df[df[col].apply(threshold_fn)]
|
||||
if len(group_df) > 0:
|
||||
groups.append({
|
||||
'name': other_label,
|
||||
'value': 1,
|
||||
'avg_hours': round(group_df['Absenteeism time in hours'].mean(), 2),
|
||||
'count': len(group_df),
|
||||
'percentage': round(len(group_df) / len(df) * 100, 1)
|
||||
})
|
||||
else:
|
||||
groups = []
|
||||
for val in sorted(df[col].unique()):
|
||||
group_df = df[df[col] == val]
|
||||
if len(group_df) > 0:
|
||||
groups.append({
|
||||
'name': value_map.get(val, str(val)),
|
||||
'value': int(val),
|
||||
'avg_hours': round(group_df['Absenteeism time in hours'].mean(), 2),
|
||||
'count': len(group_df),
|
||||
'percentage': round(len(group_df) / len(df) * 100, 1)
|
||||
})
|
||||
|
||||
if len(groups) >= 2:
|
||||
diff_value = abs(groups[0]['avg_hours'] - groups[1]['avg_hours'])
|
||||
base = min(groups[0]['avg_hours'], groups[1]['avg_hours'])
|
||||
diff_percentage = round(diff_value / base * 100, 1) if base > 0 else 0
|
||||
else:
|
||||
diff_value = 0
|
||||
diff_percentage = 0
|
||||
|
||||
return {
|
||||
'dimension': dimension,
|
||||
'dimension_name': {
|
||||
'drinker': '饮酒习惯',
|
||||
'smoker': '吸烟习惯',
|
||||
'education': '学历',
|
||||
'children': '子女',
|
||||
'pet': '宠物'
|
||||
}.get(dimension, dimension),
|
||||
'groups': groups,
|
||||
'difference': {
|
||||
'value': diff_value,
|
||||
'percentage': diff_percentage
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("Correlation matrix:")
|
||||
corr = get_correlation_for_heatmap()
|
||||
print(corr)
|
||||
|
||||
print("\nGroup comparison (drinker):")
|
||||
comp = group_comparison('drinker')
|
||||
print(comp)
|
||||
105
backend/core/preprocessing.py
Normal file
105
backend/core/preprocessing.py
Normal file
@@ -0,0 +1,105 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
import joblib
|
||||
import os
|
||||
|
||||
import config
|
||||
|
||||
|
||||
class DataPreprocessor:
|
||||
def __init__(self):
|
||||
self.scaler = StandardScaler()
|
||||
self.is_fitted = False
|
||||
self.feature_names = None
|
||||
|
||||
def load_raw_data(self):
|
||||
df = pd.read_csv(config.RAW_DATA_PATH, sep=config.CSV_SEPARATOR)
|
||||
df.columns = df.columns.str.strip()
|
||||
return df
|
||||
|
||||
def clean_data(self, df):
|
||||
df = df.copy()
|
||||
|
||||
df = df.drop_duplicates()
|
||||
|
||||
for col in df.columns:
|
||||
if df[col].isnull().sum() > 0:
|
||||
if df[col].dtype in ['int64', 'float64']:
|
||||
df[col].fillna(df[col].median(), inplace=True)
|
||||
else:
|
||||
df[col].fillna(df[col].mode()[0], inplace=True)
|
||||
|
||||
return df
|
||||
|
||||
def fit_transform(self, df):
|
||||
df = self.clean_data(df)
|
||||
|
||||
if 'Absenteeism time in hours' in df.columns:
|
||||
y = df['Absenteeism time in hours'].values
|
||||
feature_df = df.drop(columns=['Absenteeism time in hours'])
|
||||
else:
|
||||
y = None
|
||||
feature_df = df
|
||||
|
||||
self.feature_names = list(feature_df.columns)
|
||||
|
||||
X = feature_df.values
|
||||
|
||||
X = self.scaler.fit_transform(X)
|
||||
|
||||
self.is_fitted = True
|
||||
|
||||
return X, y
|
||||
|
||||
def transform(self, df):
|
||||
if not self.is_fitted:
|
||||
raise ValueError("Preprocessor has not been fitted yet.")
|
||||
|
||||
df = self.clean_data(df)
|
||||
|
||||
if 'Absenteeism time in hours' in df.columns:
|
||||
feature_df = df.drop(columns=['Absenteeism time in hours'])
|
||||
else:
|
||||
feature_df = df
|
||||
|
||||
X = feature_df.values
|
||||
X = self.scaler.transform(X)
|
||||
|
||||
return X
|
||||
|
||||
def save_preprocessor(self):
|
||||
os.makedirs(config.MODELS_DIR, exist_ok=True)
|
||||
joblib.dump(self.scaler, config.SCALER_PATH)
|
||||
joblib.dump(self.feature_names, os.path.join(config.MODELS_DIR, 'feature_names.pkl'))
|
||||
|
||||
def load_preprocessor(self):
|
||||
self.scaler = joblib.load(config.SCALER_PATH)
|
||||
feature_names_path = os.path.join(config.MODELS_DIR, 'feature_names.pkl')
|
||||
if os.path.exists(feature_names_path):
|
||||
self.feature_names = joblib.load(feature_names_path)
|
||||
self.is_fitted = True
|
||||
|
||||
|
||||
def get_clean_data():
|
||||
preprocessor = DataPreprocessor()
|
||||
df = preprocessor.load_raw_data()
|
||||
df = preprocessor.clean_data(df)
|
||||
return df
|
||||
|
||||
|
||||
def save_clean_data():
|
||||
preprocessor = DataPreprocessor()
|
||||
df = preprocessor.load_raw_data()
|
||||
df = preprocessor.clean_data(df)
|
||||
|
||||
os.makedirs(config.PROCESSED_DATA_DIR, exist_ok=True)
|
||||
df.to_csv(config.CLEAN_DATA_PATH, index=False, sep=',')
|
||||
|
||||
return df
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
df = save_clean_data()
|
||||
print(f"Clean data saved. Shape: {df.shape}")
|
||||
print(df.head())
|
||||
590
backend/core/train_model.py
Normal file
590
backend/core/train_model.py
Normal file
@@ -0,0 +1,590 @@
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import time
|
||||
from sklearn.ensemble import (
|
||||
RandomForestRegressor,
|
||||
GradientBoostingRegressor,
|
||||
ExtraTreesRegressor,
|
||||
StackingRegressor
|
||||
)
|
||||
from sklearn.linear_model import Ridge
|
||||
from sklearn.model_selection import train_test_split, RandomizedSearchCV
|
||||
from sklearn.preprocessing import RobustScaler, LabelEncoder
|
||||
from sklearn.feature_selection import SelectKBest, f_regression
|
||||
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
|
||||
import xgboost as xgb
|
||||
import lightgbm as lgb
|
||||
import joblib
|
||||
import warnings
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
import config
|
||||
from core.preprocessing import get_clean_data
|
||||
|
||||
|
||||
def print_training_log(model_name, start_time, best_score, best_params, n_iter, cv_folds):
|
||||
elapsed = time.time() - start_time
|
||||
print(f" {'─'*50}")
|
||||
print(f" Model: {model_name}")
|
||||
print(f" Time: {elapsed:.1f}s")
|
||||
print(f" Best CV R2: {best_score:.4f}")
|
||||
print(f" Best params:")
|
||||
for k, v in best_params.items():
|
||||
print(f" - {k}: {v}")
|
||||
print(f" Iterations: {n_iter}, CV folds: {cv_folds}")
|
||||
print(f" {'─'*50}")
|
||||
|
||||
|
||||
class DataAugmenter:
|
||||
def __init__(self, noise_level=0.02, n_augment=2):
|
||||
self.noise_level = noise_level
|
||||
self.n_augment = n_augment
|
||||
|
||||
def augment(self, df, target_col='Absenteeism time in hours'):
|
||||
print(f"\nData Augmentation...")
|
||||
print(f" Original size: {len(df)}")
|
||||
|
||||
augmented_dfs = [df]
|
||||
|
||||
numerical_cols = df.select_dtypes(include=[np.number]).columns.tolist()
|
||||
if target_col in numerical_cols:
|
||||
numerical_cols.remove(target_col)
|
||||
|
||||
for i in range(self.n_augment):
|
||||
df_aug = df.copy()
|
||||
|
||||
for col in numerical_cols:
|
||||
if col in df_aug.columns:
|
||||
std_val = df_aug[col].std()
|
||||
if std_val > 0:
|
||||
noise = np.random.normal(0, self.noise_level * std_val, len(df_aug))
|
||||
df_aug[col] = df_aug[col] + noise
|
||||
|
||||
augmented_dfs.append(df_aug)
|
||||
|
||||
df_result = pd.concat(augmented_dfs, ignore_index=True)
|
||||
print(f" Augmented size: {len(df_result)}")
|
||||
|
||||
return df_result
|
||||
|
||||
def smote_regression(self, df, target_col='Absenteeism time in hours'):
|
||||
df = df.copy()
|
||||
y = df[target_col].values
|
||||
|
||||
bins = [0, 1, 4, 8, 100]
|
||||
labels = ['zero', 'low', 'medium', 'high']
|
||||
df['_target_bin'] = pd.cut(y, bins=bins, labels=labels, include_lowest=True)
|
||||
|
||||
bin_counts = df['_target_bin'].value_counts()
|
||||
max_count = bin_counts.max()
|
||||
|
||||
numerical_cols = df.select_dtypes(include=[np.number]).columns.tolist()
|
||||
if target_col in numerical_cols:
|
||||
numerical_cols.remove(target_col)
|
||||
if '_target_bin' in numerical_cols:
|
||||
numerical_cols.remove('_target_bin')
|
||||
|
||||
augmented_rows = []
|
||||
for bin_label in labels:
|
||||
bin_df = df[df['_target_bin'] == bin_label].drop(columns=['_target_bin'])
|
||||
bin_size = len(bin_df)
|
||||
|
||||
if bin_size < max_count and bin_size > 0:
|
||||
n_samples_to_add = max_count - bin_size
|
||||
|
||||
for _ in range(n_samples_to_add):
|
||||
idx = np.random.choice(bin_df.index)
|
||||
sample = bin_df.loc[idx].copy()
|
||||
|
||||
for col in numerical_cols:
|
||||
if col in sample.index:
|
||||
std_val = bin_df[col].std()
|
||||
if std_val > 0:
|
||||
noise = np.random.normal(0, 0.02 * std_val)
|
||||
sample[col] = sample[col] + noise
|
||||
|
||||
augmented_rows.append(sample)
|
||||
|
||||
if augmented_rows:
|
||||
df_aug = pd.DataFrame(augmented_rows)
|
||||
df_result = pd.concat([df.drop(columns=['_target_bin']), df_aug], ignore_index=True)
|
||||
else:
|
||||
df_result = df.drop(columns=['_target_bin'])
|
||||
|
||||
print(f" After SMOTE-like augmentation: {len(df_result)}")
|
||||
|
||||
return df_result
|
||||
|
||||
|
||||
class OptimizedModelTrainer:
|
||||
def __init__(self):
|
||||
self.models = {}
|
||||
self.scaler = RobustScaler()
|
||||
self.feature_names = None
|
||||
self.selected_features = None
|
||||
self.label_encoders = {}
|
||||
self.model_metrics = {}
|
||||
self.augmenter = DataAugmenter(noise_level=0.02, n_augment=2)
|
||||
|
||||
def analyze_data(self, df):
|
||||
print("\n" + "="*60)
|
||||
print("Data Analysis")
|
||||
print("="*60)
|
||||
|
||||
y = df['Absenteeism time in hours']
|
||||
|
||||
print(f"\nTarget variable statistics:")
|
||||
print(f" Min: {y.min()}")
|
||||
print(f" Max: {y.max()}")
|
||||
print(f" Mean: {y.mean():.2f}")
|
||||
print(f" Median: {y.median():.2f}")
|
||||
print(f" Std: {y.std():.2f}")
|
||||
print(f" Skewness: {y.skew():.2f}")
|
||||
|
||||
print(f"\nTarget distribution:")
|
||||
print(f" Zero values: {(y == 0).sum()} ({(y == 0).sum() / len(y) * 100:.1f}%)")
|
||||
print(f" 1-8 hours: {((y > 0) & (y <= 8)).sum()} ({((y > 0) & (y <= 8)).sum() / len(y) * 100:.1f}%)")
|
||||
print(f" >8 hours: {(y > 8).sum()} ({(y > 8).sum() / len(y) * 100:.1f}%)")
|
||||
|
||||
return y
|
||||
|
||||
def clip_outliers(self, df, columns, lower_pct=1, upper_pct=99):
|
||||
df_clean = df.copy()
|
||||
|
||||
for col in columns:
|
||||
if col in df_clean.columns and df_clean[col].dtype in ['int64', 'float64']:
|
||||
if col == 'Absenteeism time in hours':
|
||||
continue
|
||||
lower = df_clean[col].quantile(lower_pct / 100)
|
||||
upper = df_clean[col].quantile(upper_pct / 100)
|
||||
df_clean[col] = df_clean[col].clip(lower, upper)
|
||||
|
||||
return df_clean
|
||||
|
||||
def feature_engineering(self, df):
|
||||
df = df.copy()
|
||||
|
||||
df['workload_per_age'] = df['Work load Average/day'] / (df['Age'] + 1)
|
||||
df['expense_per_distance'] = df['Transportation expense'] / (df['Distance from Residence to Work'] + 1)
|
||||
df['age_service_ratio'] = df['Age'] / (df['Service time'] + 1)
|
||||
|
||||
df['has_children'] = (df['Son'] > 0).astype(int)
|
||||
df['has_pet'] = (df['Pet'] > 0).astype(int)
|
||||
df['family_responsibility'] = df['Son'] + df['Pet']
|
||||
|
||||
df['health_risk'] = ((df['Social drinker'] == 1) | (df['Social smoker'] == 1) | (df['Body mass index'] > 30)).astype(int)
|
||||
df['lifestyle_risk'] = df['Social drinker'].astype(int) + df['Social smoker'].astype(int)
|
||||
|
||||
df['age_group'] = pd.cut(df['Age'], bins=[0, 30, 40, 50, 100], labels=[1, 2, 3, 4])
|
||||
df['service_group'] = pd.cut(df['Service time'], bins=[0, 5, 10, 20, 100], labels=[1, 2, 3, 4])
|
||||
df['bmi_category'] = pd.cut(df['Body mass index'], bins=[0, 18.5, 25, 30, 100], labels=[1, 2, 3, 4])
|
||||
|
||||
df['workload_category'] = pd.cut(df['Work load Average/day'], bins=[0, 200, 250, 300, 500], labels=[1, 2, 3, 4])
|
||||
df['commute_category'] = pd.cut(df['Distance from Residence to Work'], bins=[0, 10, 20, 50, 100], labels=[1, 2, 3, 4])
|
||||
|
||||
df['seasonal_risk'] = df['Seasons'].apply(lambda x: 1 if x in [1, 3] else 0)
|
||||
df['weekday_risk'] = df['Day of the week'].apply(lambda x: 1 if x in [2, 6] else 0)
|
||||
|
||||
df['hit_target_ratio'] = df['Hit target'] / 100
|
||||
df['experience_level'] = pd.cut(df['Service time'], bins=[0, 5, 10, 15, 100], labels=[1, 2, 3, 4])
|
||||
|
||||
df['age_workload_interaction'] = df['Age'] * df['Work load Average/day'] / 10000
|
||||
df['service_bmi_interaction'] = df['Service time'] * df['Body mass index'] / 100
|
||||
|
||||
return df
|
||||
|
||||
def select_features(self, X, y, k=20):
|
||||
print("\nFeature Selection...")
|
||||
|
||||
selector = SelectKBest(score_func=f_regression, k=min(k, X.shape[1]))
|
||||
selector.fit(X, y)
|
||||
|
||||
scores = selector.scores_
|
||||
feature_scores = list(zip(self.feature_names, scores))
|
||||
feature_scores.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
print(f"\nTop {min(k, len(feature_scores))} features by F-score:")
|
||||
for i, (name, score) in enumerate(feature_scores[:min(k, len(feature_scores))]):
|
||||
cn = config.FEATURE_NAME_CN.get(name, name)
|
||||
print(f" {i+1}. {cn}: {score:.2f}")
|
||||
|
||||
selected_mask = selector.get_support()
|
||||
self.selected_features = [f for f, s in zip(self.feature_names, selected_mask) if s]
|
||||
|
||||
return selector.transform(X)
|
||||
|
||||
def prepare_data(self):
|
||||
df = get_clean_data()
|
||||
df.columns = [col.strip() for col in df.columns]
|
||||
|
||||
df = df.drop(columns=['ID'])
|
||||
|
||||
cols_to_drop = ['Weight', 'Height', 'Reason for absence']
|
||||
for col in cols_to_drop:
|
||||
if col in df.columns:
|
||||
df = df.drop(columns=[col])
|
||||
print(" Removed features: Weight, Height, Reason for absence (data leakage risk)")
|
||||
|
||||
self.analyze_data(df)
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Data Preprocessing")
|
||||
print("="*60)
|
||||
|
||||
numerical_cols = ['Age', 'Service time', 'Work load Average/day',
|
||||
'Transportation expense', 'Distance from Residence to Work',
|
||||
'Hit target', 'Body mass index']
|
||||
df = self.clip_outliers(df, numerical_cols)
|
||||
print(" Outliers clipped (1st-99th percentile)")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Data Augmentation")
|
||||
print("="*60)
|
||||
|
||||
df = self.augmenter.smote_regression(df)
|
||||
df = self.augmenter.augment(df)
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Feature Engineering")
|
||||
print("="*60)
|
||||
|
||||
df = self.feature_engineering(df)
|
||||
|
||||
y = df['Absenteeism time in hours'].values
|
||||
X_df = df.drop(columns=['Absenteeism time in hours'])
|
||||
|
||||
ordinal_cols = ['Month of absence', 'Day of the week', 'Seasons',
|
||||
'Disciplinary failure', 'Education', 'Social drinker',
|
||||
'Social smoker', 'age_group', 'service_group',
|
||||
'bmi_category', 'workload_category', 'commute_category',
|
||||
'experience_level']
|
||||
|
||||
for col in ordinal_cols:
|
||||
if col in X_df.columns:
|
||||
le = LabelEncoder()
|
||||
X_df[col] = le.fit_transform(X_df[col].astype(str))
|
||||
self.label_encoders[col] = le
|
||||
|
||||
self.feature_names = list(X_df.columns)
|
||||
|
||||
X = X_df.values.astype(float)
|
||||
|
||||
X = self.scaler.fit_transform(X)
|
||||
|
||||
X = self.select_features(X, y, k=20)
|
||||
|
||||
print(f"\nFinal feature count: {X.shape[1]}")
|
||||
|
||||
X_train, X_test, y_train, y_test = train_test_split(
|
||||
X, y, test_size=0.2, random_state=42
|
||||
)
|
||||
|
||||
return X_train, X_test, y_train, y_test
|
||||
|
||||
def train_random_forest(self, X_train, y_train):
|
||||
print("\n" + "="*60)
|
||||
print("Training Random Forest")
|
||||
print("="*60)
|
||||
|
||||
start_time = time.time()
|
||||
rf = RandomForestRegressor(random_state=42, n_jobs=-1)
|
||||
|
||||
param_distributions = {
|
||||
'n_estimators': [200, 300, 400],
|
||||
'max_depth': [10, 15, 20, 25],
|
||||
'min_samples_split': [2, 5, 10],
|
||||
'min_samples_leaf': [1, 2, 4],
|
||||
'max_features': ['sqrt', 0.7]
|
||||
}
|
||||
|
||||
print(f" Searching {20*5} parameter combinations...")
|
||||
random_search = RandomizedSearchCV(
|
||||
rf, param_distributions, n_iter=20, cv=5,
|
||||
scoring='r2', n_jobs=-1, random_state=42
|
||||
)
|
||||
random_search.fit(X_train, y_train)
|
||||
|
||||
self.models['random_forest'] = random_search.best_estimator_
|
||||
print_training_log("Random Forest", start_time, random_search.best_score_,
|
||||
random_search.best_params_, 20, 5)
|
||||
|
||||
return random_search.best_estimator_
|
||||
|
||||
def train_xgboost(self, X_train, y_train):
|
||||
print("\n" + "="*60)
|
||||
print("Training XGBoost")
|
||||
print("="*60)
|
||||
|
||||
start_time = time.time()
|
||||
xgb_model = xgb.XGBRegressor(random_state=42, n_jobs=-1)
|
||||
|
||||
param_distributions = {
|
||||
'n_estimators': [200, 300, 400],
|
||||
'max_depth': [5, 7, 9],
|
||||
'learning_rate': [0.05, 0.1],
|
||||
'subsample': [0.7, 0.8],
|
||||
'colsample_bytree': [0.7, 0.8],
|
||||
'min_child_weight': [1, 3],
|
||||
'reg_alpha': [0, 0.1],
|
||||
'reg_lambda': [1, 1.5]
|
||||
}
|
||||
|
||||
print(f" Searching {20*5} parameter combinations...")
|
||||
random_search = RandomizedSearchCV(
|
||||
xgb_model, param_distributions, n_iter=20, cv=5,
|
||||
scoring='r2', n_jobs=-1, random_state=42
|
||||
)
|
||||
random_search.fit(X_train, y_train)
|
||||
|
||||
self.models['xgboost'] = random_search.best_estimator_
|
||||
print_training_log("XGBoost", start_time, random_search.best_score_,
|
||||
random_search.best_params_, 20, 5)
|
||||
|
||||
return random_search.best_estimator_
|
||||
|
||||
def train_lightgbm(self, X_train, y_train):
|
||||
print("\n" + "="*60)
|
||||
print("Training LightGBM")
|
||||
print("="*60)
|
||||
|
||||
start_time = time.time()
|
||||
lgb_model = lgb.LGBMRegressor(random_state=42, n_jobs=-1, verbose=-1)
|
||||
|
||||
param_distributions = {
|
||||
'n_estimators': [200, 300, 400],
|
||||
'max_depth': [7, 9, 11, -1],
|
||||
'learning_rate': [0.05, 0.1],
|
||||
'subsample': [0.7, 0.8],
|
||||
'colsample_bytree': [0.7, 0.8],
|
||||
'min_child_samples': [5, 10, 20],
|
||||
'reg_alpha': [0, 0.1],
|
||||
'reg_lambda': [1, 1.5],
|
||||
'num_leaves': [31, 50, 70]
|
||||
}
|
||||
|
||||
print(f" Searching {20*5} parameter combinations...")
|
||||
random_search = RandomizedSearchCV(
|
||||
lgb_model, param_distributions, n_iter=20, cv=5,
|
||||
scoring='r2', n_jobs=-1, random_state=42
|
||||
)
|
||||
random_search.fit(X_train, y_train)
|
||||
|
||||
self.models['lightgbm'] = random_search.best_estimator_
|
||||
print_training_log("LightGBM", start_time, random_search.best_score_,
|
||||
random_search.best_params_, 20, 5)
|
||||
|
||||
return random_search.best_estimator_
|
||||
|
||||
def train_gradient_boosting(self, X_train, y_train):
|
||||
print("\n" + "="*60)
|
||||
print("Training Gradient Boosting")
|
||||
print("="*60)
|
||||
|
||||
start_time = time.time()
|
||||
gb = GradientBoostingRegressor(random_state=42)
|
||||
|
||||
param_distributions = {
|
||||
'n_estimators': [200, 300],
|
||||
'max_depth': [5, 7, 9],
|
||||
'learning_rate': [0.05, 0.1],
|
||||
'subsample': [0.7, 0.8],
|
||||
'min_samples_split': [2, 5],
|
||||
'min_samples_leaf': [1, 2]
|
||||
}
|
||||
|
||||
print(f" Searching {15*5} parameter combinations...")
|
||||
random_search = RandomizedSearchCV(
|
||||
gb, param_distributions, n_iter=15, cv=5,
|
||||
scoring='r2', n_jobs=-1, random_state=42
|
||||
)
|
||||
random_search.fit(X_train, y_train)
|
||||
|
||||
self.models['gradient_boosting'] = random_search.best_estimator_
|
||||
print_training_log("Gradient Boosting", start_time, random_search.best_score_,
|
||||
random_search.best_params_, 15, 5)
|
||||
|
||||
return random_search.best_estimator_
|
||||
|
||||
def train_extra_trees(self, X_train, y_train):
|
||||
print("\n" + "="*60)
|
||||
print("Training Extra Trees")
|
||||
print("="*60)
|
||||
|
||||
start_time = time.time()
|
||||
et = ExtraTreesRegressor(random_state=42, n_jobs=-1)
|
||||
|
||||
param_distributions = {
|
||||
'n_estimators': [200, 300, 400],
|
||||
'max_depth': [10, 15, 20],
|
||||
'min_samples_split': [2, 5, 10],
|
||||
'min_samples_leaf': [1, 2, 4],
|
||||
'max_features': ['sqrt', 0.7]
|
||||
}
|
||||
|
||||
print(f" Searching {20*5} parameter combinations...")
|
||||
random_search = RandomizedSearchCV(
|
||||
et, param_distributions, n_iter=20, cv=5,
|
||||
scoring='r2', n_jobs=-1, random_state=42
|
||||
)
|
||||
random_search.fit(X_train, y_train)
|
||||
|
||||
self.models['extra_trees'] = random_search.best_estimator_
|
||||
print_training_log("Extra Trees", start_time, random_search.best_score_,
|
||||
random_search.best_params_, 20, 5)
|
||||
|
||||
return random_search.best_estimator_
|
||||
|
||||
def train_stacking(self, X_train, y_train):
|
||||
print("\n" + "="*60)
|
||||
print("Training Stacking Ensemble")
|
||||
print("="*60)
|
||||
|
||||
start_time = time.time()
|
||||
base_estimators = []
|
||||
|
||||
if 'random_forest' in self.models:
|
||||
base_estimators.append(('rf', self.models['random_forest']))
|
||||
if 'xgboost' in self.models:
|
||||
base_estimators.append(('xgb', self.models['xgboost']))
|
||||
if 'lightgbm' in self.models:
|
||||
base_estimators.append(('lgb', self.models['lightgbm']))
|
||||
if 'gradient_boosting' in self.models:
|
||||
base_estimators.append(('gb', self.models['gradient_boosting']))
|
||||
|
||||
if len(base_estimators) < 2:
|
||||
print(" Not enough base models for stacking")
|
||||
return None
|
||||
|
||||
print(f" Base estimators: {[name for name, _ in base_estimators]}")
|
||||
print(f" Meta learner: Ridge")
|
||||
print(f" CV folds: 5")
|
||||
|
||||
stacking = StackingRegressor(
|
||||
estimators=base_estimators,
|
||||
final_estimator=Ridge(alpha=1.0),
|
||||
cv=5,
|
||||
n_jobs=-1
|
||||
)
|
||||
stacking.fit(X_train, y_train)
|
||||
|
||||
self.models['stacking'] = stacking
|
||||
elapsed = time.time() - start_time
|
||||
print(f" {'─'*50}")
|
||||
print(f" Stacking ensemble created in {elapsed:.1f}s")
|
||||
print(f" {'─'*50}")
|
||||
|
||||
return stacking
|
||||
|
||||
def evaluate_model(self, model, X_test, y_test):
|
||||
y_pred = model.predict(X_test)
|
||||
|
||||
r2 = r2_score(y_test, y_pred)
|
||||
mse = mean_squared_error(y_test, y_pred)
|
||||
rmse = np.sqrt(mse)
|
||||
mae = mean_absolute_error(y_test, y_pred)
|
||||
|
||||
return {
|
||||
'r2': round(r2, 4),
|
||||
'mse': round(mse, 4),
|
||||
'rmse': round(rmse, 4),
|
||||
'mae': round(mae, 4)
|
||||
}
|
||||
|
||||
def save_models(self):
|
||||
os.makedirs(config.MODELS_DIR, exist_ok=True)
|
||||
|
||||
for name, model in self.models.items():
|
||||
if model is not None:
|
||||
model_path = os.path.join(config.MODELS_DIR, f'{name}_model.pkl')
|
||||
joblib.dump(model, model_path)
|
||||
print(f" {name} saved")
|
||||
|
||||
joblib.dump(self.scaler, config.SCALER_PATH)
|
||||
joblib.dump(self.feature_names, os.path.join(config.MODELS_DIR, 'feature_names.pkl'))
|
||||
joblib.dump(self.selected_features, os.path.join(config.MODELS_DIR, 'selected_features.pkl'))
|
||||
joblib.dump(self.label_encoders, os.path.join(config.MODELS_DIR, 'label_encoders.pkl'))
|
||||
joblib.dump(self.model_metrics, os.path.join(config.MODELS_DIR, 'model_metrics.pkl'))
|
||||
print(" Scaler and feature info saved")
|
||||
|
||||
def train_all(self):
|
||||
total_start = time.time()
|
||||
print("\n" + "="*60)
|
||||
print("Optimized Model Training Started")
|
||||
print("="*60)
|
||||
print(f"Start time: {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
|
||||
X_train, X_test, y_train, y_test = self.prepare_data()
|
||||
|
||||
print(f"\nTrain size: {len(X_train)}, Test size: {len(X_test)}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Training Models with Hyperparameter Optimization")
|
||||
print("="*60)
|
||||
|
||||
self.train_random_forest(X_train, y_train)
|
||||
self.train_extra_trees(X_train, y_train)
|
||||
self.train_xgboost(X_train, y_train)
|
||||
self.train_lightgbm(X_train, y_train)
|
||||
self.train_gradient_boosting(X_train, y_train)
|
||||
self.train_stacking(X_train, y_train)
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Evaluating Models on Test Set")
|
||||
print("="*60)
|
||||
|
||||
best_r2 = -float('inf')
|
||||
best_model = None
|
||||
|
||||
for name, model in self.models.items():
|
||||
if model is not None:
|
||||
metrics = self.evaluate_model(model, X_test, y_test)
|
||||
self.model_metrics[name] = metrics
|
||||
|
||||
status = "Good" if metrics['r2'] > 0.5 else ("OK" if metrics['r2'] > 0.3 else "Poor")
|
||||
status_icon = "✓" if status == "Good" else ("△" if status == "OK" else "✗")
|
||||
print(f" {status_icon} {name:20s} - R2: {metrics['r2']:.4f}, RMSE: {metrics['rmse']:.4f}, MAE: {metrics['mae']:.4f}")
|
||||
|
||||
if metrics['r2'] > best_r2:
|
||||
best_r2 = metrics['r2']
|
||||
best_model = name
|
||||
|
||||
print(f"\n ★ Best Model: {best_model} (R2 = {best_r2:.4f})")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Saving Models")
|
||||
print("="*60)
|
||||
self.save_models()
|
||||
|
||||
return self.model_metrics
|
||||
|
||||
|
||||
def train_and_save_models():
|
||||
total_start = time.time()
|
||||
trainer = OptimizedModelTrainer()
|
||||
metrics = trainer.train_all()
|
||||
total_elapsed = time.time() - total_start
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Training Complete!")
|
||||
print("="*60)
|
||||
print(f"Total training time: {total_elapsed:.1f}s ({total_elapsed/60:.1f} min)")
|
||||
print(f"End time: {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
|
||||
print("\n" + "-"*60)
|
||||
print("Final Model Ranking (by R2)")
|
||||
print("-"*60)
|
||||
|
||||
sorted_metrics = sorted(metrics.items(), key=lambda x: x[1]['r2'], reverse=True)
|
||||
for i, (name, m) in enumerate(sorted_metrics, 1):
|
||||
medal = "🥇" if i == 1 else ("🥈" if i == 2 else ("🥉" if i == 3 else " "))
|
||||
print(f" {medal} {i}. {name:20s} - R2: {m['r2']:.4f}, RMSE: {m['rmse']:.4f}")
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
train_and_save_models()
|
||||
Reference in New Issue
Block a user