You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

158 lines
5.9 KiB
Python

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

"""
文件名: detect_num.py
训练部分代码
作者: 王春林
创建日期: 2023年7月13日
最后修改日期: 2023年7月18日
版本号: 1.0.0
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import StratifiedKFold
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from torchsummary import summary
# 检查GPU是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 读取特征和标签
data = pd.read_excel('feature_label.xlsx')
# 以下是你的特征名
feature_names = ["躯体化", "强迫症状", "人际关系敏感", "抑郁", "焦虑", "敌对", "恐怖", "偏执", "精神病性", "其他", "父亲教养方式数字化", "母亲教养方式数字化", "自评家庭经济条件数字化", "有无心理治疗(咨询)史数字化", "出勤情况数字化", "学业情况数字化", "权重数字化值"]
# 将特征和标签分开,并做归一化处理
X = data[feature_names].values
y = (data['label'].values - 1) / 3 # 将标签从1-4转换为0-1
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
# 定义 MLP 网络
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.model = nn.Sequential(
nn.Linear(17, 32), # 输入层
nn.ReLU(), # 激活函数
nn.Linear(32, 128), # 隐藏层
nn.ReLU(), # 激活函数
nn.Linear(128, 32), # 隐藏层
nn.ReLU(), # 激活函数
nn.Linear(32, 1), # 输出层1个类别
)
def forward(self, x):
return self.model(x).squeeze() # 去除多余的维度
# 使用KFold而非StratifiedKFold
kfold = KFold(n_splits=5, shuffle=True)
# 用于存储所有折的损失和准确率
all_train_losses, all_val_losses, all_train_accs, all_val_accs = [], [], [], []
for fold, (train_index, test_index) in enumerate(kfold.split(X, y)):
X_train, X_val = X[train_index], X[test_index]
y_train, y_val = y[train_index], y[test_index]
train_dataset = TensorDataset(torch.from_numpy(X_train).float().to(device), torch.from_numpy(y_train).float().to(device))
val_dataset = TensorDataset(torch.from_numpy(X_val).float().to(device), torch.from_numpy(y_val).float().to(device))
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32)
model = MLP().to(device)
# 查看模型网络结构
# print(model)
summary(model.to(torch.device("cuda:0")), (1,17))
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
n_epochs = 120 # 增加到150个epoch
train_losses, val_losses, train_accs, val_accs = [], [], [], []
# 存储每一折的模型和对应的验证准确率
best_val_acc = 0.0
best_model = None
for epoch in range(n_epochs):
model.train()
running_loss, corrects = 0, 0
for inputs, targets in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
# 使用阈值判断类别
thresholds = [1/6, 1/2, 5/6]
preds = torch.tensor([sum(o.item() > t for t in thresholds) for o in outputs]).to(device)
corrects += torch.sum(preds == (targets.data*3).long())
epoch_loss = running_loss / len(train_loader.dataset)
epoch_acc = corrects.double().cpu() / len(train_loader.dataset)
train_losses.append(epoch_loss)
train_accs.append(epoch_acc)
print(f'Fold {fold+1}, Epoch {epoch+1} | Train Loss: {epoch_loss:.4f} | Train Accuracy: {epoch_acc:.4f}')
model.eval()
running_loss, corrects = 0, 0
with torch.no_grad():
for inputs, targets in val_loader:
outputs = model(inputs)
loss = criterion(outputs, targets)
running_loss += loss.item() * inputs.size(0)
# 使用阈值判断类别
thresholds = [1/6, 1/2, 5/6]
preds = torch.tensor([sum(o.item() > t for t in thresholds) for o in torch.flatten(outputs)]).to(device)
corrects += torch.sum(preds == (targets.data*3).long())
epoch_loss = running_loss / len(val_loader.dataset)
epoch_acc = corrects.double().cpu() / len(val_loader.dataset)
val_losses.append(epoch_loss)
val_accs.append(epoch_acc)
print(f'Fold {fold+1}, Epoch {epoch+1} | Validation Loss: {epoch_loss:.4f} | Validation Accuracy: {epoch_acc:.4f}')
# 保存最佳模型
if epoch_acc > best_val_acc:
best_val_acc = epoch_acc
best_model = model.state_dict()
# 保存每一折的最佳模型
torch.save(best_model, f'model_fold{fold+1}.pth')
all_train_losses.append(train_losses)
all_val_losses.append(val_losses)
all_train_accs.append(train_accs)
all_val_accs.append(val_accs)
# 绘制所有折的平均损失和准确率曲线
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(range(n_epochs), np.mean(all_train_losses, axis=0), label='Train Loss')
plt.plot(range(n_epochs), np.mean(all_val_losses, axis=0), label='Validation Loss')
plt.legend()
plt.title('Loss')
plt.subplot(1, 2, 2)
plt.plot(range(n_epochs), np.mean(all_train_accs, axis=0), label='Train Accuracy')
plt.plot(range(n_epochs), np.mean(all_val_accs, axis=0), label='Validation Accuracy')
plt.legend()
plt.title('Accuracy')
print(f'All Fold Average | Train Loss: {np.mean(all_train_losses, axis=0)[-1].item():.4f} | Train Accuracy: {np.mean(all_train_accs, axis=0)[-1].item():.4f} | Validation Loss: {np.mean(all_val_losses, axis=0)[-1].item():.4f} | Validation Accuracy: {np.mean(all_val_accs, axis=0)[-1].item():.4f}')
plt.show()