feat: 将数据集从国外员工缺勤数据替换为中国企业缺勤模拟数据
- 新增中国企业员工缺勤模拟数据集生成脚本(generate_dataset.py),覆盖7个行业、180家企业、2600名员工 - 重构 config.py,更新特征字段为中文名称,调整目标列、员工ID、行业类型等配置 - 重构 clustering.py,简化聚类逻辑,更新聚类特征和群体命名(高压通勤型、健康波动型等) - 重构 feature_mining.py,更新相关性分析和群体比较维度(按行业、班次、婚姻状态等) - 新增 model_features.py 定义模型训练特征 - 更新 preprocessing.py 和 train_model.py 适配新数据结构 - 更新各 API 路由默认参数(model: random_forest, dimension: industry) - 前端更新主题样式和各视图组件适配中文字段 - 更新系统名称为 China Enterprise Absence Analysis System
This commit is contained in:
@@ -1,9 +1,6 @@
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from sklearn.cluster import KMeans
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
import joblib
|
||||
import os
|
||||
|
||||
import config
|
||||
from core.preprocessing import get_clean_data
|
||||
@@ -14,216 +11,123 @@ class KMeansAnalyzer:
|
||||
self.n_clusters = n_clusters
|
||||
self.model = None
|
||||
self.scaler = MinMaxScaler()
|
||||
self.data = None
|
||||
self.data_scaled = None
|
||||
self.labels = None
|
||||
|
||||
def _get_feature_columns(self, df):
|
||||
df.columns = [col.strip() for col in df.columns]
|
||||
|
||||
feature_map = {
|
||||
'Age': None,
|
||||
'Service time': None,
|
||||
'Work load Average/day': None,
|
||||
'Body mass index': None,
|
||||
'Absenteeism time in hours': None
|
||||
}
|
||||
|
||||
for key in feature_map:
|
||||
if key in df.columns:
|
||||
feature_map[key] = key
|
||||
else:
|
||||
for col in df.columns:
|
||||
if key.replace(' ', '').lower() == col.replace(' ', '').lower():
|
||||
feature_map[key] = col
|
||||
break
|
||||
|
||||
actual_features = [v for v in feature_map.values() if v is not None]
|
||||
return actual_features
|
||||
|
||||
self.feature_cols = [
|
||||
'年龄',
|
||||
'司龄年数',
|
||||
'月均加班时长',
|
||||
'通勤时长分钟',
|
||||
'BMI',
|
||||
'缺勤时长(小时)',
|
||||
]
|
||||
|
||||
def fit(self, n_clusters=None):
|
||||
if n_clusters:
|
||||
self.n_clusters = n_clusters
|
||||
|
||||
df = get_clean_data()
|
||||
df = df.reset_index(drop=True)
|
||||
|
||||
feature_cols = self._get_feature_columns(df)
|
||||
|
||||
if not feature_cols:
|
||||
feature_cols = ['Age', 'Service time', 'Body mass index', 'Absenteeism time in hours']
|
||||
feature_cols = [c for c in feature_cols if c in df.columns]
|
||||
|
||||
self.data = df[feature_cols].values
|
||||
|
||||
self.scaler = MinMaxScaler()
|
||||
self.data_scaled = self.scaler.fit_transform(self.data)
|
||||
|
||||
self.model = KMeans(
|
||||
n_clusters=self.n_clusters,
|
||||
random_state=config.RANDOM_STATE,
|
||||
n_init=10
|
||||
)
|
||||
|
||||
self.labels = self.model.fit_predict(self.data_scaled)
|
||||
|
||||
df = get_clean_data().reset_index(drop=True)
|
||||
data = df[self.feature_cols].values
|
||||
data_scaled = self.scaler.fit_transform(data)
|
||||
self.model = KMeans(n_clusters=self.n_clusters, random_state=config.RANDOM_STATE, n_init=10)
|
||||
self.labels = self.model.fit_predict(data_scaled)
|
||||
return self.model
|
||||
|
||||
|
||||
def get_cluster_results(self, n_clusters=3):
|
||||
if self.model is None or self.n_clusters != n_clusters:
|
||||
self.fit(n_clusters)
|
||||
|
||||
centers = self.scaler.inverse_transform(self.model.cluster_centers_)
|
||||
|
||||
unique, counts = np.unique(self.labels, return_counts=True)
|
||||
total = len(self.labels)
|
||||
|
||||
cluster_names = self._generate_cluster_names(centers)
|
||||
|
||||
feature_cols = self._get_feature_columns(get_clean_data())
|
||||
|
||||
names = self._generate_cluster_names(centers)
|
||||
clusters = []
|
||||
for i, (cluster_id, count) in enumerate(zip(unique, counts)):
|
||||
center_dict = {}
|
||||
for j, fname in enumerate(feature_cols):
|
||||
if j < len(centers[i]):
|
||||
center_dict[fname] = round(centers[i][j], 2)
|
||||
|
||||
for cluster_id, count in zip(unique, counts):
|
||||
center = centers[int(cluster_id)]
|
||||
clusters.append({
|
||||
'id': int(cluster_id),
|
||||
'name': cluster_names.get(cluster_id, f'群体{cluster_id+1}'),
|
||||
'name': names.get(int(cluster_id), f'群体{int(cluster_id) + 1}'),
|
||||
'member_count': int(count),
|
||||
'percentage': round(count / total * 100, 1),
|
||||
'center': center_dict,
|
||||
'description': self._generate_description(cluster_names.get(cluster_id, ''))
|
||||
'center': {
|
||||
feature: round(float(value), 2)
|
||||
for feature, value in zip(self.feature_cols, center)
|
||||
},
|
||||
'description': self._generate_description(names.get(int(cluster_id), '')),
|
||||
})
|
||||
|
||||
return {
|
||||
'n_clusters': self.n_clusters,
|
||||
'clusters': clusters
|
||||
}
|
||||
|
||||
return {'n_clusters': self.n_clusters, 'clusters': clusters}
|
||||
|
||||
def get_cluster_profile(self, n_clusters=3):
|
||||
if self.model is None or self.n_clusters != n_clusters:
|
||||
self.fit(n_clusters)
|
||||
|
||||
centers_scaled = self.model.cluster_centers_
|
||||
|
||||
df = get_clean_data()
|
||||
df.columns = [col.strip() for col in df.columns]
|
||||
feature_cols = self._get_feature_columns(df)
|
||||
|
||||
dimensions = ['年龄', '工龄', '工作负荷', 'BMI', '缺勤倾向'][:len(feature_cols)]
|
||||
|
||||
cluster_names = self._generate_cluster_names(
|
||||
self.scaler.inverse_transform(centers_scaled)
|
||||
)
|
||||
|
||||
clusters = []
|
||||
for i in range(self.n_clusters):
|
||||
clusters.append({
|
||||
'id': i,
|
||||
'name': cluster_names.get(i, f'群体{i+1}'),
|
||||
'values': [round(v, 2) for v in centers_scaled[i]]
|
||||
})
|
||||
|
||||
names = self._generate_cluster_names(self.scaler.inverse_transform(centers_scaled))
|
||||
return {
|
||||
'dimensions': dimensions,
|
||||
'dimension_keys': feature_cols,
|
||||
'clusters': clusters
|
||||
'dimensions': ['年龄', '司龄', '加班', '通勤', 'BMI', '缺勤'],
|
||||
'dimension_keys': self.feature_cols,
|
||||
'clusters': [
|
||||
{
|
||||
'id': idx,
|
||||
'name': names.get(idx, f'群体{idx + 1}'),
|
||||
'values': [round(float(v), 2) for v in centers_scaled[idx]],
|
||||
}
|
||||
for idx in range(self.n_clusters)
|
||||
],
|
||||
}
|
||||
|
||||
def get_scatter_data(self, n_clusters=3, x_axis='Age', y_axis='Absenteeism time in hours'):
|
||||
|
||||
def get_scatter_data(self, n_clusters=3, x_axis='月均加班时长', y_axis='缺勤时长(小时)'):
|
||||
if self.model is None or self.n_clusters != n_clusters:
|
||||
self.fit(n_clusters)
|
||||
|
||||
df = get_clean_data()
|
||||
df = df.reset_index(drop=True)
|
||||
df.columns = [col.strip() for col in df.columns]
|
||||
|
||||
x_col = None
|
||||
y_col = None
|
||||
|
||||
for col in df.columns:
|
||||
if x_axis.replace(' ', '').lower() in col.replace(' ', '').lower():
|
||||
x_col = col
|
||||
if y_axis.replace(' ', '').lower() in col.replace(' ', '').lower():
|
||||
y_col = col
|
||||
|
||||
if x_col is None:
|
||||
x_col = df.columns[0]
|
||||
if y_col is None:
|
||||
y_col = df.columns[-1]
|
||||
|
||||
df = get_clean_data().reset_index(drop=True)
|
||||
if x_axis not in df.columns:
|
||||
x_axis = '月均加班时长'
|
||||
if y_axis not in df.columns:
|
||||
y_axis = config.TARGET_COLUMN
|
||||
points = []
|
||||
for idx in range(min(len(df), len(self.labels))):
|
||||
row = df.iloc[idx]
|
||||
points.append({
|
||||
'employee_id': int(row['ID']),
|
||||
'x': float(row[x_col]),
|
||||
'y': float(row[y_col]),
|
||||
'cluster_id': int(self.labels[idx])
|
||||
'employee_id': str(row[config.EMPLOYEE_ID_COLUMN]),
|
||||
'x': float(row[x_axis]),
|
||||
'y': float(row[y_axis]),
|
||||
'cluster_id': int(self.labels[idx]),
|
||||
})
|
||||
|
||||
cluster_colors = {
|
||||
'0': '#67C23A',
|
||||
'1': '#E6A23C',
|
||||
'2': '#F56C6C',
|
||||
'3': '#909399',
|
||||
'4': '#409EFF'
|
||||
}
|
||||
|
||||
return {
|
||||
'x_axis': x_col,
|
||||
'x_axis_name': config.FEATURE_NAME_CN.get(x_col, x_col),
|
||||
'y_axis': y_col,
|
||||
'y_axis_name': config.FEATURE_NAME_CN.get(y_col, y_col),
|
||||
'x_axis': x_axis,
|
||||
'x_axis_name': config.FEATURE_NAME_CN.get(x_axis, x_axis),
|
||||
'y_axis': y_axis,
|
||||
'y_axis_name': config.FEATURE_NAME_CN.get(y_axis, y_axis),
|
||||
'points': points[:500],
|
||||
'cluster_colors': cluster_colors
|
||||
'cluster_colors': {
|
||||
'0': '#5B8FF9',
|
||||
'1': '#61DDAA',
|
||||
'2': '#F6BD16',
|
||||
'3': '#E8684A',
|
||||
'4': '#6DC8EC',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _generate_cluster_names(self, centers):
|
||||
names = {}
|
||||
|
||||
for i, center in enumerate(centers):
|
||||
if len(center) >= 5:
|
||||
service_time = center[1]
|
||||
work_load = center[2]
|
||||
bmi = center[3]
|
||||
absent = center[4]
|
||||
for idx, center in enumerate(centers):
|
||||
_, tenure, overtime, commute, bmi, absence = center
|
||||
if overtime > 38 and commute > 55 and absence > 8:
|
||||
names[idx] = '高压通勤型'
|
||||
elif bmi > 27 and absence > 8:
|
||||
names[idx] = '健康波动型'
|
||||
elif tenure > 8 and absence < 6:
|
||||
names[idx] = '稳定低风险型'
|
||||
elif overtime > 28 and absence > 7:
|
||||
names[idx] = '轮班负荷型'
|
||||
else:
|
||||
service_time = center[1] if len(center) > 1 else 0
|
||||
work_load = 0
|
||||
bmi = center[2] if len(center) > 2 else 0
|
||||
absent = center[3] if len(center) > 3 else 0
|
||||
|
||||
if service_time > 15 and absent < 3:
|
||||
names[i] = '模范型员工'
|
||||
elif work_load > 260 and absent > 5:
|
||||
names[i] = '压力型员工'
|
||||
elif bmi > 28:
|
||||
names[i] = '生活习惯型员工'
|
||||
else:
|
||||
names[i] = f'群体{i+1}'
|
||||
|
||||
names[idx] = f'群体{idx + 1}'
|
||||
return names
|
||||
|
||||
|
||||
def _generate_description(self, name):
|
||||
descriptions = {
|
||||
'模范型员工': '工龄长、工作稳定、缺勤率低',
|
||||
'压力型员工': '工作负荷大、缺勤较多',
|
||||
'生活习惯型员工': 'BMI偏高、需关注健康'
|
||||
'高压通勤型': '加班和通勤压力都高,缺勤时长偏长。',
|
||||
'健康波动型': '健康相关风险更高,需要重点关注。',
|
||||
'稳定低风险型': '司龄较长,缺勤水平稳定且偏低。',
|
||||
'轮班负荷型': '排班和工作负荷较重,缺勤风险较高。',
|
||||
}
|
||||
return descriptions.get(name, '常规员工群体')
|
||||
|
||||
def save_model(self):
|
||||
os.makedirs(config.MODELS_DIR, exist_ok=True)
|
||||
joblib.dump(self.model, config.KMEANS_MODEL_PATH)
|
||||
|
||||
def load_model(self):
|
||||
if os.path.exists(config.KMEANS_MODEL_PATH):
|
||||
self.model = joblib.load(config.KMEANS_MODEL_PATH)
|
||||
self.n_clusters = self.model.n_clusters
|
||||
return descriptions.get(name, '常规员工群体。')
|
||||
|
||||
|
||||
kmeans_analyzer = KMeansAnalyzer()
|
||||
|
||||
Reference in New Issue
Block a user