完整的训练:feature_label.xlsx Feature_Weighting_Unnormalize.py Data_Partition.py Psy_Train.py;验证:Psy_Detect.py;推理:detect_server_api.py inference.py;训练环境:requirements-psy.txt;验证、推理环境:requirements-psy_cpu.txt

main
wangchunlin 2 years ago
parent d32c84c501
commit c16c135dc4

@ -0,0 +1,51 @@
"""
文件名: Data_Partition.py
将所有样本数据划分为4+14份训练1份验证在原有5倍交叉验证的基础上改动取消5倍交叉验证机制
作者: 王春林
创建日期: 2024年3月18日
最后修改日期: 2023年3月31日
版本号: 1.0.0
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import StratifiedKFold
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.utils.class_weight import compute_class_weight
# 读取特征和标签
data = pd.read_excel('feature_label_weighted.xlsx')
# 以下是你的特征名
feature_names = ["强迫症状数字化", "人际关系敏感数字化", "抑郁数字化", "多因子症状", "母亲教养方式数字化", "父亲教养方式数字化", "自评家庭经济条件数字化", "有无心理治疗(咨询)史数字化", "学业情况数字化", "出勤情况数字化"]
# 将特征和标签分开,并做归一化处理
X = data[feature_names].values
y = data['类别'].values # 加权阶段已经处理-1不再处理
# 使用5折交叉验证划分样本
skf = StratifiedKFold(n_splits=5, shuffle=True)
for fold, (train_index, test_index) in enumerate(skf.split(X, y)):
X_train, X_val = X[train_index], X[test_index]
y_train, y_val = y[train_index], y[test_index]
###### 保存为excel文件########
# 创建 DataFrame 并保存为 excel 文件
train_data = pd.DataFrame(X_train, columns=feature_names)
train_data['类别'] = y_train
train_data.to_excel(f'train_fold{fold}.xlsx', index=False)
val_data = pd.DataFrame(X_val, columns=feature_names)
val_data['类别'] = y_val
val_data.to_excel(f'val_fold{fold}.xlsx', index=False)
###### 保存为excel文件########

@ -0,0 +1,55 @@
"""
文件名: Data_Partition.py
将所有样本数据划分为4+14份训练1份验证在原有5倍交叉验证的基础上改动取消5倍交叉验证机制
作者: 王春林
创建日期: 2024年3月18日
最后修改日期: 2023年3月31日
版本号: 1.0.0
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import StratifiedKFold
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.utils.class_weight import compute_class_weight
# 读取特征和标签
data = pd.read_excel('feature_label.xlsx')
# 以下是你的特征名
feature_names = ["强迫症状数字化", "人际关系敏感数字化", "抑郁数字化", "多因子症状", "母亲教养方式数字化", "父亲教养方式数字化", "自评家庭经济条件数字化", "有无心理治疗(咨询)史数字化", "学业情况数字化", "出勤情况数字化"]
# 定义特征权重列表
feature_weights = [0.135, 0.085, 0.08, 0.2, 0.09, 0.09, 0.06, 0.06, 0.08, 0.12]
# 找到最大值
max_value = max(feature_weights)
# 缩放权重
feature_weights_scaled = [x / max_value for x in feature_weights]
# 打印缩放后的特征权重
print("Scaled Feature Weights:", feature_weights_scaled)
# 将特征和标签分开,并做归一化处理
X = data[feature_names].values
y = data['label'].values - 1 # 将标签从1-4转换为0-3
# 分别乘以权重,放在归一化后
for i in range(len(feature_names)):
X[:, i] = X[:, i] * feature_weights_scaled[i]
feature_label_weighted = pd.DataFrame(X, columns=feature_names)
feature_label_weighted['类别'] = y
feature_label_weighted['学号'] = data['编号']
feature_label_weighted.to_excel('feature_label_weighted.xlsx', index=False)

@ -0,0 +1,100 @@
"""
文件名: train_gpu_blance_10features.py
训练部分代码
作者: 王春林
创建日期: 2023年10月18日
最后修改日期: 2023年10月20日
版本号: 1.0.0
"""
import pandas as pd
import numpy as np
import torch
from torch import nn
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.utils.class_weight import compute_class_weight
# 验证集EXCEL文件名测试集使用验证集
val_excel = r'val_fold0.xlsx'
# 检查GPU是否可用
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
# 定义 MLP 网络
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.model = nn.Sequential(
nn.Linear(10, 32), # 输入层
nn.ReLU(), # 激活函数
nn.Linear(32, 128), # 隐藏层
nn.ReLU(), # 激活函数
nn.Linear(128, 32), # 隐藏层
nn.ReLU(), # 激活函数
nn.Linear(32, 4), # 输出层4个类别
)
def forward(self, x):
return self.model(x)
# 读取特征和标签
val_data = pd.read_excel(val_excel)
# 以下是你的特征名
feature_names = ["强迫症状数字化", "人际关系敏感数字化", "抑郁数字化", "多因子症状", "母亲教养方式数字化", "父亲教养方式数字化", "自评家庭经济条件数字化", "有无心理治疗(咨询)史数字化", "学业情况数字化", "出勤情况数字化"]
# 将特征和标签分开,并做归一化处理
X_val = val_data[feature_names].values
y_val = val_data['类别'].values
X_val_tensor = torch.from_numpy(X_val).float().to(device)
# 加载模型
model = MLP().to(device)
model.load_state_dict(torch.load('train_fold0.xlsx.pth', map_location=device)) # 加载训练好的模型参数
model.eval()
# 进行推理
with torch.no_grad():
outputs = model(X_val_tensor)
# 获取预测结果
_, predictions = torch.max(outputs, 1)
# 打印预测结果
#print("预测结果:", predictions.cpu().numpy())
# 打印前100个预测结果和实际结果
print("前100个预测结果:", predictions.cpu().numpy()[:100])
print("前100个实际结果:", y_val[:100])
# 获取预测错误的样本序号
wrong_indices = np.where(y_val != predictions.cpu().numpy())[0]
print("预测错误的样本序号:", wrong_indices)
# 统计预测错误的数量
wrong_count = len(wrong_indices)
total_count = len(y_val)
wrong_percentage = (wrong_count / total_count) * 100
print("预测错误数量:", wrong_count)
print("预测错误占总数量的百分比:", wrong_percentage, "%")
print("总数量:", total_count)
# 统计每种类别的精确率、召回率、F1得分
precision = precision_score(y_val, predictions.cpu().numpy(), average=None)
recall = recall_score(y_val, predictions.cpu().numpy(), average=None)
f1 = f1_score(y_val, predictions.cpu().numpy(), average=None)
# 计算平均精确率、召回率和F1
avg_precision = np.mean(precision)
avg_recall = np.mean(recall)
avg_f1 = np.mean(f1)
print("精确率:", precision)
print("召回率:", recall)
print("F1得分:", f1)
print("平均精确率:", avg_precision)
print("平均召回率:", avg_recall)
print("平均F1得分:", avg_f1)

@ -0,0 +1,211 @@
"""
文件名: train_gpu_blance_10features.py
训练部分代码
作者: 王春林
创建日期: 2023年10月18日
最后修改日期: 2023年10月20日
版本号: 1.0.0
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import StratifiedKFold
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.utils.class_weight import compute_class_weight
# 训练集EXCEL文件名
train_excel = r'train_fold0.xlsx'
# 验证集EXCEL文件名
val_excel = r'val_fold0.xlsx'
# 检查GPU是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 定义 MLP 网络
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.model = nn.Sequential(
nn.Linear(10, 32), # 输入层
nn.ReLU(), # 激活函数
nn.Linear(32, 128), # 隐藏层
nn.ReLU(), # 激活函数
nn.Linear(128, 32), # 隐藏层
nn.ReLU(), # 激活函数
nn.Linear(32, 4), # 输出层4个类别
)
def forward(self, x):
return self.model(x)
# 读取特征和标签
train_data = pd.read_excel(train_excel)
val_data = pd.read_excel(val_excel)
# 以下是你的特征名
feature_names = ["强迫症状数字化", "人际关系敏感数字化", "抑郁数字化", "多因子症状", "母亲教养方式数字化", "父亲教养方式数字化", "自评家庭经济条件数字化", "有无心理治疗(咨询)史数字化", "学业情况数字化", "出勤情况数字化"]
# 将特征和标签分开,并做归一化处理
X_train = train_data[feature_names].values
y_train = train_data['类别'].values
X_val = val_data[feature_names].values
y_val = val_data['类别'].values
train_dataset = TensorDataset(torch.from_numpy(X_train).float().to(device), torch.from_numpy(y_train).long().to(device))
val_dataset = TensorDataset(torch.from_numpy(X_val).float().to(device), torch.from_numpy(y_val).long().to(device))
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=16)
model = MLP().to(device)
#criterion = nn.CrossEntropyLoss()
#optimizer = torch.optim.Adam(model.parameters())
#optimizer = torch.optim.Adam(model.parameters(), lr=0.0005, weight_decay=1e-4)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
n_epochs = 150 # 增加到150个epoch
train_losses, val_losses, train_accs, val_accs, val_class_precisions, val_class_recalls, val_class_f1_scores = [], [], [], [], [], [], []
# 增加样本平衡机制
class_sample_counts = np.bincount(y_train)
class_weights = 1.0 / torch.tensor(class_sample_counts, dtype=torch.float32)
class_weights = class_weights.to(device)
print(class_sample_counts)
print(class_weights)
# 计算类别权重
# class_weights = compute_class_weight('balanced', classes=np.unique(y_train), y=y_train)
# class_weights = torch.tensor(class_weights, dtype=torch.float32).to(device)
# print("class weights: ", class_weights)
# additional_weights = torch.tensor([1.0, 1.0, 1.0, 1.0], dtype=torch.float32).to(device)
# class_weights *= additional_weights
# print("Updated class weights: ", class_weights)
# 使用加权交叉熵损失函数
criterion = nn.CrossEntropyLoss(weight=class_weights)
#criterion = nn.CrossEntropyLoss()
# 存储每一折的模型和对应的验证准确率
best_val_acc = 0.0
best_model = None
for epoch in range(n_epochs):
model.train()
running_loss, corrects = 0, 0
for inputs, targets in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
_, preds = torch.max(outputs, 1)
corrects += torch.sum(preds == targets.data)
epoch_loss = running_loss / len(train_loader.dataset)
epoch_acc = corrects.double().cpu() / len(train_loader.dataset)
train_losses.append(epoch_loss)
train_accs.append(epoch_acc)
print(f'Fold {+1}, Epoch {epoch+1} | Train Loss: {epoch_loss:.4f} | Train Accuracy: {epoch_acc:.4f}')
model.eval()
all_preds, all_targets = [], []
running_loss, corrects = 0, 0
with torch.no_grad():
for inputs, targets in val_loader:
outputs = model(inputs)
loss = criterion(outputs, targets)
running_loss += loss.item() * inputs.size(0)
_, preds = torch.max(outputs, 1)
corrects += torch.sum(preds == targets.data)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
class_precisions = precision_score(all_targets, all_preds, average=None)
class_recalls = recall_score(all_targets, all_preds, average=None)
class_f1_scores = f1_score(all_targets, all_preds, average=None)
for i, (precision, recall, f1) in enumerate(zip(class_precisions, class_recalls, class_f1_scores)):
print(f'Fold {+1}, Epoch {epoch+1} | Class {i+1} Metrics: Precision={precision:.4f}, Recall={recall:.4f}, F1 Score={f1:.4f}')
epoch_loss = running_loss / len(val_loader.dataset)
epoch_acc = corrects.double().cpu() / len(val_loader.dataset)
val_losses.append(epoch_loss)
val_accs.append(epoch_acc)
val_class_precisions.append(np.mean(class_precisions))
val_class_recalls.append(np.mean(class_recalls))
val_class_f1_scores.append(np.mean(class_f1_scores))
print(f'Fold {+1}, Epoch {epoch+1} | Validation Loss: {epoch_loss:.4f} | Validation Accuracy: {epoch_acc:.4f}')
# 保存最佳模型
if np.mean(class_f1_scores) > best_val_acc:
best_val_acc = np.mean(class_f1_scores)
best_model = model.state_dict()
# 保存每一折的最佳模型
torch.save(best_model, train_excel+f'.pth')
# 用于存储所有折的损失和准确率
all_train_losses, all_val_losses, all_train_accs, all_val_accs, all_class_precisions, all_class_f1_scores, all_class_recalls = [], [], [], [], [], [], []
all_train_losses.append(train_losses)
all_val_losses.append(val_losses)
all_train_accs.append(train_accs)
all_val_accs.append(val_accs)
all_class_precisions.append(val_class_precisions)
all_class_recalls.append(val_class_recalls)
all_class_f1_scores.append(val_class_f1_scores)
print(f'All Fold Average | Train Loss: {np.mean(all_train_losses, axis=0)[-1].item():.4f} | Train Accuracy: {np.mean(all_train_accs, axis=0)[-1].item():.4f} | Validation Loss: {np.mean(all_val_losses, axis=0)[-1].item():.4f} | Validation Accuracy: {np.mean(all_val_accs, axis=0)[-1].item():.4f} | Validation Precision: {np.mean(all_class_precisions, axis=0)[-1].item():.4f} | Validation Recall: {np.mean(all_class_recalls, axis=0)[-1].item():.4f} | Validation F1_score: {np.mean(all_class_f1_scores, axis=0)[-1].item():.4f}')
# all_train_losses=train_losses
# all_val_losses=val_losses
# all_train_accs=train_accs
# all_val_accs=val_accs
# all_class_precisions=val_class_precisions
# all_class_recalls=val_class_recalls
# all_class_f1_scores=val_class_f1_scores
# print(f'All Fold Average | Train Loss: {all_train_losses:.4f} | Train Accuracy: {all_train_accs:.4f} | Validation Loss: {all_val_losses:.4f} | Validation Accuracy: {all_val_accs:.4f} | Validation Precision: {all_class_precisions:.4f} | Validation Recall: {all_class_recalls:.4f} | Validation F1_score: {all_class_f1_scores:.4f}')
# 绘制所有折的平均损失和准确率曲线
plt.figure(figsize=(12, 4))
plt.subplot(3, 2, 1)
plt.plot(range(n_epochs), np.mean(all_train_losses, axis=0), label='Train Loss')
plt.plot(range(n_epochs), np.mean(all_val_losses, axis=0), label='Validation Loss')
plt.legend()
plt.title('Loss')
plt.subplot(3, 2, 2)
plt.plot(range(n_epochs), np.mean(all_train_accs, axis=0), label='Train Accuracy')
plt.plot(range(n_epochs), np.mean(all_val_accs, axis=0), label='Validation Accuracy')
plt.legend()
plt.title('Accuracy')
plt.subplot(3, 2, 3)
plt.plot(range(n_epochs), np.mean(all_class_precisions, axis=0), label='Validation Precision')
plt.legend()
plt.title('Precision')
plt.subplot(3, 2, 4)
plt.plot(range(n_epochs), np.mean(all_class_recalls, axis=0), label='Validation Recall')
plt.legend()
plt.title('Recall')
plt.subplot(3, 2, 5)
plt.plot(range(n_epochs), np.mean(all_class_f1_scores, axis=0), label='Validation F1_score')
plt.legend()
plt.title('F1_score')
plt.show()

@ -0,0 +1,161 @@
import os
import sys
root_path = os.getcwd()
sys.path.append(root_path)
import time
import signal
import uvicorn
import pandas as pd
from fastapi import FastAPI
from pydantic import BaseModel
from typing import List
from inference import predict_with_model
from fastapi.middleware.cors import CORSMiddleware
import threading
from http.server import SimpleHTTPRequestHandler, HTTPServer
app = FastAPI()
# 允许所有域名的跨域请求
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
allow_headers=["*"],
)
# 定义请求处理类
class MyRequestHandler(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.directory = '.' # 设置根目录
# 定义返回结构
class PredictionResult(BaseModel):
predictions: list
# 定义请求体模型
class Features(BaseModel):
# 10个SCL评测量后续再处理
somatization: float
obsessive_compulsive: float
interpersonal_sensitivity: float
depression: float
anxiety: float
hostility: float
terror: float
paranoia: float
psychoticism: float
other: float
# 基本信息特征量
father_parenting_style: int # 温暖与理解1其他0
mother_parenting_style: int # 温暖与理解1其他0
self_assessed_family_economic_condition: int # 贫困2较差1其他0
history_of_psychological_counseling: bool # 有10
# 日常行为特征量
absenteeism_above_average: bool # 大于平均次数1小于等于0
academic_warning: bool # 有预警1无预警0
# 定义接口
@app.post("/classify/")
async def classify_features(features_list: List[Features]):
# 定义一个空的 DataFrame 用于存储所有特征
all_features = pd.DataFrame()
# 遍历每个特征对象,为每个对象创建一个 DataFrame并将其添加到 all_features 中
for features in features_list:
relevant_features = {
"somatization": features.somatization,
"obsessive_compulsive": features.obsessive_compulsive,
"interpersonal_sensitivity": features.interpersonal_sensitivity,
"depression": features.depression,
"anxiety": features.anxiety,
"hostility": features.hostility,
"terror": features.terror,
"paranoia": features.paranoia,
"psychoticism": features.psychoticism,
"other": features.other
}
# 创建只有一行的 DataFrame并直接赋值给列
df_feature = pd.DataFrame({
# 数字化特征--基本信息
'父亲教养方式数字化': [(lambda x: 0.59 if x == 1 else 0.46)(features.father_parenting_style)],
'母亲教养方式数字化': [(lambda x: 0.69 if x == 1 else 0.56)(features.mother_parenting_style)],
'自评家庭经济条件数字化': [(lambda x: 0.54 if x in [2, 1] else 0.47)(features.self_assessed_family_economic_condition)],
'有无心理治疗(咨询)史数字化': [(lambda x: 0.21 if x else 0.09)(features.history_of_psychological_counseling)],
# 数字化特征--症状因子
'强迫症状数字化': [features.obsessive_compulsive / 4],
'人际关系敏感数字化': [features.interpersonal_sensitivity / 4],
'抑郁数字化': [features.depression / 4],
'多因子症状': [(lambda x: sum(1 for value in x.values() if value > 3.0) / 10)(relevant_features)],
# 数字化特征--日常行为
'出勤情况数字化': [0.74 if features.absenteeism_above_average else 0.67],
'学业情况数字化': [0.59 if features.academic_warning else 0.50]
})
all_features = pd.concat([all_features, df_feature], ignore_index=True)
# # 将 DataFrame 转换为字典
# df_dict = df_feature.to_dict(orient='records')
# print(df_dict)
# # 返回 FastAPI 响应
# return df_dict
# print(all_features)
start_time = time.time() # 记录开始时间
predictions = predict_with_model(all_features)
end_time = time.time() # 记录结束时间
print("预测耗时:", end_time - start_time, "") # 打印执行时间
print("预测结果:", predictions)
# 返回预测结果
return PredictionResult(predictions=predictions)
# 信号处理函数
def signal_handler(sig, frame):
print("Ctrl+C detected, shutting down the server...")
# 在这里执行关闭服务器的操作
sys.exit(0)
# 启动服务器的函数
def run_server():
port = 8080
server = HTTPServer(('0.0.0.0', port), MyRequestHandler)
print(f'Web HTTP Server listening on http://0.0.0.0:{port}')
server.serve_forever()
if __name__ == "__main__":
# 注册信号处理函数
signal.signal(signal.SIGINT, signal_handler)
# 创建线程并启动前端
server_thread = threading.Thread(target=run_server)
server_thread.start()
name_app = os.path.basename(__file__)[0:-3] # Get the name of the script
log_config = {
"version": 1,
"disable_existing_loggers": True,
"handlers": {
"file_handler": {
"class": "logging.FileHandler",
"filename": "logfile.log",
},
},
"root": {
"handlers": ["file_handler"],
"level": "INFO",
},
}
#uvicorn.run(f'{name_app}:app', host="0.0.0.0", port=3397, reload=False,log_config=log_config)
uvicorn.run(app, host="0.0.0.0", port=3397, reload=False)

Binary file not shown.

@ -0,0 +1,84 @@
"""
文件名: train_gpu_blance_10features.py
训练部分代码
作者: 王春林
创建日期: 2023年10月18日
最后修改日期: 2023年10月20日
版本号: 1.0.0
"""
import numpy as np
import torch
from torch import nn
# 定义 MLP 网络
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.model = nn.Sequential(
nn.Linear(10, 32), # 输入层
nn.ReLU(), # 激活函数
nn.Linear(32, 128), # 隐藏层
nn.ReLU(), # 激活函数
nn.Linear(128, 32), # 隐藏层
nn.ReLU(), # 激活函数
nn.Linear(32, 4), # 输出层4个类别
)
def forward(self, x):
return self.model(x)
# 检查GPU是否可用
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
def predict_with_model(data):
# # 读取特征和标签
# data = pd.read_excel('feature_label.xlsx')
# 以下是你的特征名
feature_names = ["强迫症状数字化", "人际关系敏感数字化", "抑郁数字化", "多因子症状", "母亲教养方式数字化", "父亲教养方式数字化", "自评家庭经济条件数字化", "有无心理治疗(咨询)史数字化", "学业情况数字化", "出勤情况数字化"]
# 定义特征权重列表
feature_weights = [0.135, 0.085, 0.08, 0.2, 0.09, 0.09, 0.06, 0.06, 0.08, 0.12]
# 找到最大值
max_value = max(feature_weights)
# 缩放权重
feature_weights_scaled = [x / max_value for x in feature_weights]
# 打印缩放后的特征权重
# print("Scaled Feature Weights:", feature_weights_scaled)
# 将特征和标签分开,并做归一化处理
X = data[feature_names].values
# 分别乘以权重,放在归一化后
for i in range(len(feature_names)):
X[:, i] = X[:, i] * feature_weights_scaled[i]
model = MLP().to(device)
model.load_state_dict(torch.load('psychology.pth', map_location=device))
model.eval()
# 转换输入数据为 Tensor并放到设备上
X_tensor = torch.from_numpy(X).float().to(device)
# 推理
with torch.no_grad():
outputs = model(X_tensor)
# 获取预测结果
_, predictions = torch.max(outputs, 1)
# 实际类别从1开始程序类别从0开始
predictions += 1
# 打印预测结果
# print("预测结果:", predictions.cpu().numpy())
# 返回预测结果
return predictions.cpu().numpy().tolist()

@ -0,0 +1,64 @@
# Python 3.10.14
altgraph==0.17.4
annotated-types==0.6.0
anyio==4.3.0
certifi==2024.2.2
charset-normalizer==3.3.2
click==8.1.7
cmake==3.28.4
contourpy==1.2.0
cycler==0.12.1
et-xmlfile==1.1.0
exceptiongroup==1.2.0
fastapi==0.110.0
filelock==3.13.2
fonttools==4.50.0
h11==0.14.0
idna==3.6
Jinja2==3.1.3
joblib==1.3.2
kiwisolver==1.4.5
lit==18.1.2
MarkupSafe==2.1.5
matplotlib==3.7.2
mpmath==1.3.0
networkx==3.2.1
numpy==1.26.4
nvidia-cublas-cu11==11.10.3.66
nvidia-cuda-cupti-cu11==11.7.101
nvidia-cuda-nvrtc-cu11==11.7.99
nvidia-cuda-runtime-cu11==11.7.99
nvidia-cudnn-cu11==8.5.0.96
nvidia-cufft-cu11==10.9.0.58
nvidia-curand-cu11==10.2.10.91
nvidia-cusolver-cu11==11.4.0.1
nvidia-cusparse-cu11==11.7.4.91
nvidia-nccl-cu11==2.14.3
nvidia-nvtx-cu11==11.7.91
openpyxl==3.1.2
packaging==24.0
pandas==2.0.0
pillow==10.2.0
pydantic==2.6.4
pydantic_core==2.16.3
pyinstaller==6.5.0
pyinstaller-hooks-contrib==2024.3
pyparsing==3.0.9
python-dateutil==2.9.0.post0
pytz==2024.1
requests==2.31.0
scikit-learn==1.2.2
scipy==1.12.0
six==1.16.0
sniffio==1.3.1
starlette==0.36.3
sympy==1.12
threadpoolctl==3.4.0
torch==2.0.0
torchaudio==2.0.1
torchvision==0.15.1
triton==2.0.0
typing_extensions==4.10.0
tzdata==2024.1
urllib3==2.2.1
uvicorn==0.29.0

@ -0,0 +1,43 @@
# Python 3.10.14
altgraph==0.17.4
annotated-types==0.6.0
anyio==4.3.0
certifi==2024.2.2
charset-normalizer==3.3.2
click==8.1.7
cycler==0.12.1
et-xmlfile==1.1.0
exceptiongroup==1.2.0
fastapi==0.110.0
filelock==3.13.2
fonttools==4.50.0
h11==0.14.0
idna==3.6
Jinja2==3.1.3
joblib==1.3.2
lit==18.1.2
MarkupSafe==2.1.5
numpy==1.26.4
openpyxl==3.1.2
packaging==24.0
pandas==2.0.0
pillow==10.2.0
pydantic==2.6.4
pydantic_core==2.16.3
pyinstaller==6.5.0
pyinstaller-hooks-contrib==2024.3
pyparsing==3.0.9
python-dateutil==2.9.0.post0
pytz==2024.1
requests==2.31.0
scikit-learn==1.2.2
scipy==1.12.0
six==1.16.0
sniffio==1.3.1
starlette==0.36.3
threadpoolctl==3.4.0
torch==1.11.0+cpu
typing_extensions==4.10.0
tzdata==2024.1
urllib3==2.2.1
uvicorn==0.29.0
Loading…
Cancel
Save