You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
80 lines
2.2 KiB
Python
80 lines
2.2 KiB
Python
import pandas as pd
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.optim as optim
|
|
import numpy as np
|
|
from torch.utils.data import Dataset, DataLoader
|
|
|
|
# Define MLP model
|
|
class MLP(nn.Module):
|
|
def __init__(self, input_size, hidden_size, output_size):
|
|
super(MLP, self).__init__()
|
|
self.fc1 = nn.Linear(input_size, hidden_size)
|
|
self.relu1 = nn.ReLU()
|
|
self.fc2 = nn.Linear(hidden_size, output_size)
|
|
self.sigmoid = nn.Sigmoid()
|
|
|
|
def forward(self, x):
|
|
out = self.fc1(x)
|
|
out = self.relu1(out)
|
|
out = self.fc2(out)
|
|
out = self.sigmoid(out)
|
|
return out
|
|
|
|
# Define custom dataset
|
|
class PsychologyDataset(Dataset):
|
|
def __init__(self, data_file):
|
|
self.data = pd.read_excel(data_file)
|
|
|
|
def __len__(self):
|
|
return len(self.data)
|
|
|
|
def __getitem__(self, idx):
|
|
features = self.data.iloc[idx, 36:43].values.astype(np.float32)
|
|
str = self.data.iloc[idx, -1]
|
|
#print(idx,str,self.data.iloc[0, 0])
|
|
label = -1
|
|
if(str=="是"):
|
|
label = 1
|
|
else:
|
|
label = 0
|
|
#print(features)
|
|
label = np.float32(label)
|
|
#return torch.tensor(features, dtype=torch.float), label
|
|
return features, label
|
|
|
|
# Set hyperparameters
|
|
input_size = 7
|
|
hidden_size = 16
|
|
output_size = 1
|
|
lr = 0.01
|
|
num_epochs = 100
|
|
|
|
# Load data
|
|
dataset = PsychologyDataset("data/data_src.xlsx")
|
|
dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
|
|
|
|
# Instantiate model, loss function, and optimizer
|
|
model = MLP(input_size, hidden_size, output_size)
|
|
criterion = nn.BCELoss()
|
|
optimizer = optim.Adam(model.parameters(), lr=lr)
|
|
|
|
# Train model
|
|
for epoch in range(num_epochs):
|
|
running_loss = 0.0
|
|
#print(type(dataloader))
|
|
for i, data in enumerate(dataloader):
|
|
#print("数据序号:", i, data)
|
|
#continue
|
|
inputs, labels = data
|
|
optimizer.zero_grad()
|
|
outputs = model(inputs)
|
|
loss = criterion(outputs, labels.unsqueeze(1))
|
|
loss.backward()
|
|
optimizer.step()
|
|
running_loss += loss.item()
|
|
print('Epoch [%d/%d], Loss: %.4f' % (epoch+1, num_epochs, running_loss / len(dataloader)))
|
|
|
|
# Save trained model
|
|
torch.save(model.state_dict(), 'psychology_model.pth')
|