|
|
|
@ -5,8 +5,13 @@ import numpy as np
|
|
|
|
import pandas as pd
|
|
|
|
import pandas as pd
|
|
|
|
from sklearn.model_selection import StratifiedKFold
|
|
|
|
from sklearn.model_selection import StratifiedKFold
|
|
|
|
from torch.utils.data import DataLoader, Dataset
|
|
|
|
from torch.utils.data import DataLoader, Dataset
|
|
|
|
|
|
|
|
from torchsummary import summary
|
|
|
|
|
|
|
|
# from torchviz import make_dot # 需要apt安装graphviz工具,图不好看,先不用
|
|
|
|
|
|
|
|
import time
|
|
|
|
|
|
|
|
import random
|
|
|
|
|
|
|
|
|
|
|
|
# 定义 MLP 模型
|
|
|
|
|
|
|
|
|
|
|
|
# 定义 MLP 模型,每一层需要手写
|
|
|
|
class MLP(nn.Module):
|
|
|
|
class MLP(nn.Module):
|
|
|
|
def __init__(self, input_size, hidden_size, output_size):
|
|
|
|
def __init__(self, input_size, hidden_size, output_size):
|
|
|
|
super(MLP, self).__init__()
|
|
|
|
super(MLP, self).__init__()
|
|
|
|
@ -22,8 +27,7 @@ class MLP(nn.Module):
|
|
|
|
out = self.sigmoid(out)
|
|
|
|
out = self.sigmoid(out)
|
|
|
|
return out
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 定义 MMLP 模型,隐藏层根据输入数组自动生成全连接,并在每一层后自动加入ReLU激活函数
|
|
|
|
|
|
|
|
|
|
|
|
class MMLP(nn.Module):
|
|
|
|
class MMLP(nn.Module):
|
|
|
|
def __init__(self, input_size, hidden_sizes, output_size):
|
|
|
|
def __init__(self, input_size, hidden_sizes, output_size):
|
|
|
|
super(MMLP, self).__init__()
|
|
|
|
super(MMLP, self).__init__()
|
|
|
|
@ -33,7 +37,8 @@ class MMLP(nn.Module):
|
|
|
|
self.layers.append(nn.ReLU())
|
|
|
|
self.layers.append(nn.ReLU())
|
|
|
|
input_size = h
|
|
|
|
input_size = h
|
|
|
|
self.layers.append(nn.Linear(input_size, output_size))
|
|
|
|
self.layers.append(nn.Linear(input_size, output_size))
|
|
|
|
self.layers.append(nn.Sigmoid())
|
|
|
|
# 最后一层加入激活函数后,会严重影响收敛情况,原因待分析
|
|
|
|
|
|
|
|
#self.layers.append(nn.Sigmoid())
|
|
|
|
#self.layers.append(nn.Softmax(dim=1))
|
|
|
|
#self.layers.append(nn.Softmax(dim=1))
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
def forward(self, x):
|
|
|
|
@ -41,7 +46,6 @@ class MMLP(nn.Module):
|
|
|
|
x = layer(x)
|
|
|
|
x = layer(x)
|
|
|
|
return x
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 定义数据集
|
|
|
|
# 定义数据集
|
|
|
|
class TensorDataset(Dataset):
|
|
|
|
class TensorDataset(Dataset):
|
|
|
|
def __init__(self, features, labels):
|
|
|
|
def __init__(self, features, labels):
|
|
|
|
@ -61,6 +65,7 @@ def train_model(model, train_loader, criterion, optimizer, num_epochs):
|
|
|
|
model.train()
|
|
|
|
model.train()
|
|
|
|
|
|
|
|
|
|
|
|
for epoch in range(num_epochs):
|
|
|
|
for epoch in range(num_epochs):
|
|
|
|
|
|
|
|
start_time = time.time()
|
|
|
|
train_loss = 0.0
|
|
|
|
train_loss = 0.0
|
|
|
|
train_corrects = 0
|
|
|
|
train_corrects = 0
|
|
|
|
|
|
|
|
|
|
|
|
@ -68,11 +73,34 @@ def train_model(model, train_loader, criterion, optimizer, num_epochs):
|
|
|
|
inputs = inputs.to(device)
|
|
|
|
inputs = inputs.to(device)
|
|
|
|
labels = labels.to(device)
|
|
|
|
labels = labels.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
optimizer.zero_grad()
|
|
|
|
# # 由于出现了奇怪的情况,用于验证我的数据到这里开始训练为止是否是正确的
|
|
|
|
|
|
|
|
# for i, input in enumerate(inputs):
|
|
|
|
|
|
|
|
# ii = -1
|
|
|
|
|
|
|
|
# for score in input:
|
|
|
|
|
|
|
|
# if score >=3.0:
|
|
|
|
|
|
|
|
# ii = 1
|
|
|
|
|
|
|
|
# break
|
|
|
|
|
|
|
|
# else:
|
|
|
|
|
|
|
|
# ii = 0
|
|
|
|
|
|
|
|
# assert(labels[i] == ii), f"{inputs} # {labels} # 第{i}个出现问题"
|
|
|
|
|
|
|
|
# ## 由于出现了奇怪的情况,用于验证我的数据到这里开始训练为止是否是正确的
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
optimizer.zero_grad()
|
|
|
|
outputs = model(inputs)
|
|
|
|
outputs = model(inputs)
|
|
|
|
_, preds = torch.max(outputs, 1)
|
|
|
|
_, preds = torch.max(outputs, 1)
|
|
|
|
loss = criterion(outputs, labels.unsqueeze(1))
|
|
|
|
#print(outputs)
|
|
|
|
|
|
|
|
# print("$$$$$$$$$")
|
|
|
|
|
|
|
|
#print(preds)
|
|
|
|
|
|
|
|
#print(inputs)
|
|
|
|
|
|
|
|
#assert(torch.sum(preds)==0)
|
|
|
|
|
|
|
|
# print("####")
|
|
|
|
|
|
|
|
#print(labels)
|
|
|
|
|
|
|
|
# print("$$$$$$$$$")
|
|
|
|
|
|
|
|
#loss = criterion(outputs, labels.unsqueeze(1))
|
|
|
|
|
|
|
|
#loss = criterion(outputs, torch.tensor(labels, dtype=torch.long))
|
|
|
|
|
|
|
|
loss = criterion(outputs, labels.long())
|
|
|
|
|
|
|
|
#print(loss)
|
|
|
|
|
|
|
|
#print(inputs.size())
|
|
|
|
loss.backward()
|
|
|
|
loss.backward()
|
|
|
|
optimizer.step()
|
|
|
|
optimizer.step()
|
|
|
|
|
|
|
|
|
|
|
|
@ -82,8 +110,8 @@ def train_model(model, train_loader, criterion, optimizer, num_epochs):
|
|
|
|
train_loss = train_loss / len(train_loader.dataset)
|
|
|
|
train_loss = train_loss / len(train_loader.dataset)
|
|
|
|
train_acc = train_corrects.double() / len(train_loader.dataset)
|
|
|
|
train_acc = train_corrects.double() / len(train_loader.dataset)
|
|
|
|
|
|
|
|
|
|
|
|
print('Epoch [{}/{}], Loss: {:.4f}, Acc: {:.4f}'
|
|
|
|
print('Epoch [{}/{}], Loss: {:.4f}, Acc: {:.4f}, took time: {:.2f}s'
|
|
|
|
.format(epoch+1, num_epochs, train_loss, train_acc))
|
|
|
|
.format(epoch+1, num_epochs, train_loss, train_acc, time.time() - start_time))
|
|
|
|
|
|
|
|
|
|
|
|
# 定义测试函数
|
|
|
|
# 定义测试函数
|
|
|
|
def test(model, dataloader, criterion):
|
|
|
|
def test(model, dataloader, criterion):
|
|
|
|
@ -92,13 +120,16 @@ def test(model, dataloader, criterion):
|
|
|
|
running_loss = 0.0
|
|
|
|
running_loss = 0.0
|
|
|
|
running_corrects = 0
|
|
|
|
running_corrects = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 测试的写法相比于训练,主要是model.eval()和torch.no_grad(),用于屏蔽测试阶段梯度计算
|
|
|
|
|
|
|
|
# torch.no_gard拥有上下文管理属性,该代码块中,所有操作不跟踪梯度,减少内存和时间,训练阶段需要梯度计算,故不能使用;model.eval可以禁用BatchNorm层和Dropout层使用,以免在推断时造成不一致的结果,也可以减少内存和时间,如果训练模式下使用eval,可能会影响训练的正确性(不仅仅是内存和时间)
|
|
|
|
with torch.no_grad():
|
|
|
|
with torch.no_grad():
|
|
|
|
for inputs, labels in dataloader:
|
|
|
|
for inputs, labels in dataloader:
|
|
|
|
inputs = inputs.to(device)
|
|
|
|
inputs = inputs.to(device)
|
|
|
|
labels = labels.to(device)
|
|
|
|
labels = labels.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
outputs = model(inputs)
|
|
|
|
outputs = model(inputs)
|
|
|
|
loss = criterion(outputs, labels.unsqueeze(1))
|
|
|
|
#loss = criterion(outputs, labels.unsqueeze(1))
|
|
|
|
|
|
|
|
loss = criterion(outputs, labels.long())
|
|
|
|
|
|
|
|
|
|
|
|
_, preds = torch.max(outputs, 1)
|
|
|
|
_, preds = torch.max(outputs, 1)
|
|
|
|
|
|
|
|
|
|
|
|
@ -111,117 +142,82 @@ def test(model, dataloader, criterion):
|
|
|
|
print('Test Loss: {:.4f} Acc: {:.4f}'.format(test_loss, test_acc))
|
|
|
|
print('Test Loss: {:.4f} Acc: {:.4f}'.format(test_loss, test_acc))
|
|
|
|
return test_loss, test_acc
|
|
|
|
return test_loss, test_acc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 加载数据
|
|
|
|
# 加载数据
|
|
|
|
df = pd.read_excel("data/data_src.xlsx")
|
|
|
|
df = pd.read_excel("data/data_src.xlsx")
|
|
|
|
src_features = df.iloc[:, 36:43].values.astype(np.float32)
|
|
|
|
src_features = df.iloc[:, 34:44].values.astype(np.float32)
|
|
|
|
src_labels = np.array([1 if str=="是" else 0 for str in df.iloc[:, -1].values]).astype(np.float32)
|
|
|
|
src_labels = np.array([1 if str=="是" else 0 for str in df.iloc[:, -1].values]).astype(np.float32)
|
|
|
|
print(src_labels)
|
|
|
|
print("数据样本总量:", src_features.shape[0])
|
|
|
|
print(len(src_labels))
|
|
|
|
print("数据特征维度:", src_features.shape[1])
|
|
|
|
|
|
|
|
print("数据类别数量:", len(set(src_labels)))
|
|
|
|
# 检查是否有可用的GPU设备
|
|
|
|
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
|
|
|
|
print('Using device:', device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 定义交叉验证折数
|
|
|
|
# 定义交叉验证折数
|
|
|
|
n_splits = 5
|
|
|
|
n_splits = 5
|
|
|
|
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
|
|
|
|
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random.randint(0, 1000))
|
|
|
|
|
|
|
|
|
|
|
|
# 定义模型参数
|
|
|
|
# 定义模型参数
|
|
|
|
input_size = src_features.shape[1]
|
|
|
|
input_size = src_features.shape[1]
|
|
|
|
print(input_size)
|
|
|
|
hidden_sizes = [32,128,32]
|
|
|
|
hidden_size = 32
|
|
|
|
output_size = len(set(src_labels)) # CrossEntropyLoss()损失函数类别数量就是输出size,sigmoid损失只适用于二分类,size为1,这里统一使用Cross
|
|
|
|
output_size = 1
|
|
|
|
lr = 0.0001 # learn rate 学习率
|
|
|
|
lr = 0.01
|
|
|
|
num_epochs = 1000
|
|
|
|
num_epochs = 50
|
|
|
|
batch_size = 128
|
|
|
|
batch_size = 32
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 定义损失函数和优化器
|
|
|
|
|
|
|
|
# criterion = nn.BCELoss()
|
|
|
|
|
|
|
|
# optimizer = optim.Adam(model.parameters(), lr=lr)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 进行交叉验证训练和测试
|
|
|
|
# 定义全局结果变量
|
|
|
|
# for fold, (train_idx, val_idx) in enumerate(skf.split(features, labels)):
|
|
|
|
|
|
|
|
# print(f"Fold {fold+1}:")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# # 将数据集分为训练集和验证集
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 进行交叉验证训练和测试
|
|
|
|
|
|
|
|
k_folds = 5
|
|
|
|
|
|
|
|
#num_epochs = 50
|
|
|
|
|
|
|
|
batch_size = 16
|
|
|
|
|
|
|
|
fold_accuracy=[]
|
|
|
|
fold_accuracy=[]
|
|
|
|
|
|
|
|
fold_loss=[]
|
|
|
|
|
|
|
|
|
|
|
|
# 总数26111
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 遍历每个 fold
|
|
|
|
# 遍历每个 fold
|
|
|
|
for fold, (train_idx, test_idx) in enumerate(skf.split(src_features, src_labels)):
|
|
|
|
for fold, (train_idx, test_idx) in enumerate(skf.split(src_features, src_labels)):
|
|
|
|
print(f"Fold [{fold+1}/{skf.n_splits}]")
|
|
|
|
print(f"Fold [{fold+1}/{skf.n_splits}]")
|
|
|
|
print("train_idx:", train_idx)
|
|
|
|
print("train_idx:", train_idx)
|
|
|
|
print("test_idx:", test_idx)
|
|
|
|
print("test_idx:", test_idx)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 数据切片
|
|
|
|
train_features = src_features[train_idx]
|
|
|
|
train_features = src_features[train_idx]
|
|
|
|
train_labels = src_labels[train_idx]
|
|
|
|
train_labels = src_labels[train_idx]
|
|
|
|
test_features = src_features[test_idx]
|
|
|
|
test_features = src_features[test_idx]
|
|
|
|
test_labels = src_labels[test_idx]
|
|
|
|
test_labels = src_labels[test_idx]
|
|
|
|
|
|
|
|
|
|
|
|
# 将numpy数组转为PyTorch张量
|
|
|
|
# 将numpy数组转为PyTorch张量,这里统一使用float型,如果是Cross损失函数,这里的labels可以直接使用long整型,省去后面再转
|
|
|
|
train_features_tensor = torch.tensor(train_features, dtype=torch.float)
|
|
|
|
train_features_tensor = torch.tensor(train_features, dtype=torch.float)
|
|
|
|
train_labels_tensor = torch.tensor(train_labels, dtype=torch.float)
|
|
|
|
train_labels_tensor = torch.tensor(train_labels, dtype=torch.float)
|
|
|
|
test_features_tensor = torch.tensor(test_features, dtype=torch.float)
|
|
|
|
test_features_tensor = torch.tensor(test_features, dtype=torch.float)
|
|
|
|
test_labels_tensor = torch.tensor(test_labels, dtype=torch.float)
|
|
|
|
test_labels_tensor = torch.tensor(test_labels, dtype=torch.float)
|
|
|
|
# 构建数据集和数据加载器
|
|
|
|
|
|
|
|
|
|
|
|
# 构建数据集和数据加载器,batch_size大小前面定义好
|
|
|
|
train_dataset = TensorDataset(train_features_tensor, train_labels_tensor)
|
|
|
|
train_dataset = TensorDataset(train_features_tensor, train_labels_tensor)
|
|
|
|
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
|
|
|
|
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
|
|
|
|
test_dataset = TensorDataset(test_features_tensor, test_labels_tensor)
|
|
|
|
test_dataset = TensorDataset(test_features_tensor, test_labels_tensor)
|
|
|
|
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
|
|
|
|
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
|
|
|
|
|
|
|
|
|
|
|
|
print('--------------------------------')
|
|
|
|
print('--------------------------------')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 初始化 MLP 模型
|
|
|
|
# 初始化 MLP 模型
|
|
|
|
#model = MLP(input_size, 32, 2)
|
|
|
|
#model = MLP(input_size, 32, 2) # 手动写每一层,暂时不用
|
|
|
|
model = MMLP(input_size, [32], 1)
|
|
|
|
model = MMLP(input_size, hidden_sizes, output_size) # hidden_sizes是一个数组,多层
|
|
|
|
|
|
|
|
|
|
|
|
# 定义损失函数和优化器
|
|
|
|
# 查看模型网络结构
|
|
|
|
#criterion = nn.CrossEntropyLoss()
|
|
|
|
# print(model)
|
|
|
|
criterion = nn.BCELoss()
|
|
|
|
summary(model.to(torch.device("cuda:0")), (input_size,))
|
|
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
|
|
|
|
|
|
|
|
|
|
|
|
# 定义损失函数和优化器,不能放在KFold之前定义,保证KFold验证的独立性
|
|
|
|
|
|
|
|
criterion = nn.CrossEntropyLoss()
|
|
|
|
|
|
|
|
#criterion = nn.BCELoss() # 使用sigmoid损失函数再使用
|
|
|
|
|
|
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
|
|
|
|
|
|
|
|
|
|
|
|
# 训练 MLP 模型
|
|
|
|
# 训练 MLP 模型
|
|
|
|
train_model(model, train_loader, criterion, optimizer, num_epochs)
|
|
|
|
train_model(model, train_loader, criterion, optimizer, num_epochs)
|
|
|
|
model.train()
|
|
|
|
|
|
|
|
# for epoch in range(num_epochs):
|
|
|
|
|
|
|
|
# for i, (inputs, labels) in enumerate(train_loader):
|
|
|
|
|
|
|
|
# # 前向传播
|
|
|
|
|
|
|
|
# #print("inputs size: ", inputs.size())
|
|
|
|
|
|
|
|
# outputs = model(inputs)
|
|
|
|
|
|
|
|
# loss = criterion(outputs, labels)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# # 反向传播和优化
|
|
|
|
|
|
|
|
# optimizer.zero_grad()
|
|
|
|
|
|
|
|
# loss.backward()
|
|
|
|
|
|
|
|
# optimizer.step()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 测试 MLP 模型
|
|
|
|
# 测试 MLP 模型
|
|
|
|
test_loss, test_acc = test(model, test_loader, criterion)
|
|
|
|
test_loss, test_acc = test(model, test_loader, criterion)
|
|
|
|
# correct = 0
|
|
|
|
|
|
|
|
# total = 0
|
|
|
|
|
|
|
|
# model.eval()
|
|
|
|
|
|
|
|
# with torch.no_grad():
|
|
|
|
|
|
|
|
# for inputs, labels in test_loader:
|
|
|
|
|
|
|
|
# outputs = model(inputs)
|
|
|
|
|
|
|
|
# _, predicted = torch.max(outputs.data, 1)
|
|
|
|
|
|
|
|
# total += labels.size(0)
|
|
|
|
|
|
|
|
# correct += (predicted == labels).sum().item()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fold_accuracy.append(test_acc.item())
|
|
|
|
fold_accuracy.append(test_acc.item())
|
|
|
|
print(f'Accuracy for fold {fold}: {fold_accuracy[fold]*100} %')
|
|
|
|
fold_loss.append(test_loss)
|
|
|
|
|
|
|
|
print(f'Accuracy for fold {fold}: {fold_accuracy[fold]*100} %, loss: {fold_loss[fold]}')
|
|
|
|
print('--------------------------------')
|
|
|
|
print('--------------------------------')
|
|
|
|
|
|
|
|
|
|
|
|
print('K-FOLD CROSS VALIDATION RESULTS')
|
|
|
|
print('K-FOLD CROSS VALIDATION RESULTS')
|
|
|
|
print(f'Fold accuracies: {fold_accuracy}')
|
|
|
|
print(f'Fold accuracies: {fold_accuracy}')
|
|
|
|
print(f'Mean accuracy: {np.mean(fold_accuracy)}')
|
|
|
|
print(f'Mean accuracy: {np.mean(fold_accuracy)}')
|
|
|
|
|
|
|
|
print(f'Mean loss: {np.mean(fold_loss)}')
|
|
|
|
|
|
|
|
|
|
|
|
|