You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

64 lines
2.1 KiB
Python

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
# 检查GPU是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 读取特征和标签
data = pd.read_excel('feature_label.xlsx')
# 以下是你的特征名
feature_names = ["躯体化", "强迫症状", "人际关系敏感", "抑郁", "焦虑", "敌对", "恐怖", "偏执", "精神病性", "其他", "父亲教养方式数字化", "母亲教养方式数字化", "自评家庭经济条件数字化", "有无心理治疗(咨询)史数字化", "出勤情况数字化", "学业情况数字化", "权重数字化值"]
# 将特征和标签分开,并做归一化处理
X = data[feature_names].values
y = data['label'].values - 1 # 将标签从1-4转换为0-3
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
# 定义 MLP 网络
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.model = nn.Sequential(
nn.Linear(17, 32), # 输入层
nn.ReLU(), # 激活函数
nn.Linear(32, 128), # 隐藏层
nn.ReLU(), # 激活函数
nn.Linear(128, 32), # 隐藏层
nn.ReLU(), # 激活函数
nn.Linear(32, 4), # 输出层4个类别
)
def forward(self, x):
return self.model(x)
# 加载模型
model = MLP().to(device)
model.load_state_dict(torch.load('Psychological_Classification_4Classes.pth'))
model.eval()
# 创建数据加载器
dataset = TensorDataset(torch.from_numpy(X).float().to(device), torch.from_numpy(y).long().to(device))
loader = DataLoader(dataset, batch_size=32)
# 推理
corrects = 0
for inputs, targets in loader:
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
corrects += torch.sum(preds == targets.data)
# 打印每个样本的推理结果
for i in range(len(inputs)):
print(f'Sample {i+1} | Target: {targets[i]} | Prediction: {preds[i]}')
# 计算整体推理的正确率
accuracy = corrects.double().cpu() / len(loader.dataset)
print(f'Overall Accuracy: {accuracy:.4f}')