You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.
#---设备配置---#
device : cpu
# device: cuda
#---训练配置---#
n_epochs : 150
batch_size : 16
learning_rate : 0.001
nc : 4
#data_train: train_val # train: 只用train训练, val做验证, infer做测试; train_val: 用train和val做训练, infer做验证, infer做测试; all: 全部训练, 全部验证, 全部测试( 数据先1/5作为infer, 剩下的再1/5作为val, 剩下的4/5作为训练)
data_train : train_val
early_stop_patience : 50
gamma : 0.98
step_size : 10
experimental_mode : false
experiments_count : 50
replace_model : true # 是否替换现有模型
#---检测和推理配置---#
# 检测和推理使用模型路径
model_path : model/psychology.pth
#---样本特征---#
# 标签名称
label_name : 类别
# 特征名称
feature_names :
- "强迫症状数字化"
- "人际关系敏感数字化"
- "抑郁数字化"
- "多因子症状"
- "母亲教养方式数字化"
- "父亲教养方式数字化"
- "自评家庭经济条件数字化"
- "有无心理治疗(咨询)史数字化"
- "学业情况数字化"
- "出勤情况数字化"
# 定义特征权重列表
feature_weights :
- 0.135
- 0.085
- 0.08
- 0.2
- 0.09
- 0.09
- 0.06
- 0.06
- 0.08
- 0.12
#---网络结构---#
# MLP configuration
mlp :
input_dim : 10 # Number of input features
layers :
- output_dim : 32
activation : relu
- output_dim : 128
activation : relu
- output_dim : 32
activation : relu
output_dim : 4 # Number of classes
# Transformer configuration
transformer :
d_model : 32 # Reduced embedding dimension
nhead : 4 # Reduced number of attention heads
num_encoder_layers : 2 # Reduced number of encoder layers
num_decoder_layers : 2 # Reduced number of decoder layers
dim_feedforward : 128 # Reduced feedforward network dimension
dropout : 0.1 # Dropout probability
input_dim : 10 # Number of input features
output_dim : 4 # Number of classes
#---训练配置备份---#
# MLP good train param 1
# #---训练配置---#
# n_epochs: 150
# batch_size: 16
# learning_rate: 0.001
# nc: 4
# #data_train: train_val # train: 只用train训练, val做验证, infer做测试; train_val: 用train和val做训练, infer做验证, infer做测试; all: 全部训练, 全部验证, 全部测试( 数据先1/5作为infer, 剩下的再1/5作为val, 剩下的4/5作为训练)
# data_train: train_val
# early_stop_patience: 50
# gamma: 0.98
# step_size: 10
# experiments_count: 1
# MLP good train param 2
# #---训练配置---#
# n_epochs: 300
# batch_size: 8
# learning_rate: 0.0005
# nc: 4
# #data_train: train_val # train: 只用train训练, val做验证, infer做测试; train_val: 用train和val做训练, infer做验证, infer做测试; all: 全部训练, 全部验证, 全部测试( 数据先1/5作为infer, 剩下的再1/5作为val, 剩下的4/5作为训练)
# data_train: train_val
# early_stop_patience: 50
# gamma: 0.98
# step_size: 10
# experiments_count: 1
# Transformer good train param 1
# #---训练配置---#
# n_epochs: 150
# batch_size: 64
# learning_rate: 0.001
# nc: 4
# #data_train: train_val # train: 只用train训练, val做验证, infer做测试; train_val: 用train和val做训练, infer做验证, infer做测试; all: 全部训练, 全部验证, 全部测试( 数据先1/5作为infer, 剩下的再1/5作为val, 剩下的4/5作为训练)
# data_train: train_val
# early_stop_patience: 50
# gamma: 0.98
# step_size: 10
# experiments_count: 1
# Transformer good train param 2
# #---训练配置---#
# n_epochs: 300
# batch_size: 8
# learning_rate: 0.0005
# nc: 4
# #data_train: train_val # train: 只用train训练, val做验证, infer做测试; train_val: 用train和val做训练, infer做验证, infer做测试; all: 全部训练, 全部验证, 全部测试( 数据先1/5作为infer, 剩下的再1/5作为val, 剩下的4/5作为训练)
# data_train: train_val
# early_stop_patience: 50
# gamma: 0.98
# step_size: 10
# experiments_count: 1