diff --git a/filter/train.py b/filter/train.py index 99566d9..b495aaa 100644 --- a/filter/train.py +++ b/filter/train.py @@ -61,7 +61,7 @@ class_weights = torch.tensor( device=device ) -model = VideoClassifierV6_1().to(device) +model = VideoClassifierV6_1(num_heads=8).to(device) checkpoint_name = './filter/checkpoints/best_model_V6.1.pt' # 初始化tokenizer和embedding模型 @@ -75,10 +75,10 @@ os.makedirs('./filter/checkpoints', exist_ok=True) eval_interval = 20 num_epochs = 20 total_steps = samples_count * num_epochs / batch_size -warmup_rate = 0.1 -optimizer = optim.AdamW(model.parameters(), lr=3e-5, weight_decay=1e-3) +warmup_rate = 0.15 +optimizer = optim.AdamW(model.parameters(), lr=3e-5, weight_decay=1e-4) cosine_annealing_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_steps - int(total_steps * warmup_rate)) -warmup_scheduler = optim.lr_scheduler.LinearLR(optimizer, start_factor=0.8, end_factor=1.0, total_iters=int(total_steps * warmup_rate)) +warmup_scheduler = optim.lr_scheduler.LinearLR(optimizer, start_factor=0.4, end_factor=1.0, total_iters=int(total_steps * warmup_rate)) scheduler = optim.lr_scheduler.SequentialLR(optimizer, schedulers=[warmup_scheduler, cosine_annealing_scheduler], milestones=[int(total_steps * warmup_rate)]) criterion = nn.CrossEntropyLoss(weight=class_weights).to(device)