update: filter to v5

This commit is contained in:
alikia2x (寒寒) 2025-01-25 01:56:08 +08:00
parent 501428ab47
commit 6c5dfaae8b
Signed by: alikia2x
GPG Key ID: 56209E0CCD8420C6
3 changed files with 106 additions and 20 deletions

15
filter/RunningLogs.txt Normal file
View File

@ -0,0 +1,15 @@
Note
2028: V1
2233: V2
2255: V1
2319: V2.5-test2 # V2.x效果太差代码已删除
0006: V1.5-test3 # test3代表测试集第3个版本
0011: V3-test3
0043: V1.5-test3
0048: V3.1-test3
0056: V3.2-test3
0116: V4-test3 # V4 尝试删除了author_info 通道
0125: V4.1-test3
0133: V4.2-test3
0138: V4.3-test3
0155: V5-test3 # V4 的效果也不是特别好

62
filter/modelV5.py Normal file
View File

@ -0,0 +1,62 @@
import torch
import torch.nn as nn
class VideoClassifierV5(nn.Module):
def __init__(self, embedding_dim=1024, hidden_dim=640, output_dim=3):
super().__init__()
self.num_channels = 4
self.channel_names = ['title', 'description', 'tags', 'author_info']
# 改进1带温度系数的通道权重比原始固定权重更灵活
self.channel_weights = nn.Parameter(torch.ones(self.num_channels))
self.temperature = 1.4 # 可调节的平滑系数
# 改进2更稳健的全连接结构
self.fc = nn.Sequential(
nn.Linear(embedding_dim * self.num_channels, hidden_dim*2),
nn.BatchNorm1d(hidden_dim*2),
nn.Dropout(0.1),
nn.ReLU(),
nn.Linear(hidden_dim*2, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.Linear(hidden_dim, output_dim)
)
# 改进3输出层初始化
nn.init.xavier_uniform_(self.fc[-1].weight)
nn.init.zeros_(self.fc[-1].bias)
def forward(self, input_texts, sentence_transformer):
# 合并所有通道文本进行批量编码
all_texts = [text for channel in self.channel_names for text in input_texts[channel]]
# 使用SentenceTransformer生成嵌入保持冻结
with torch.no_grad():
task = "classification"
embeddings = torch.tensor(
sentence_transformer.encode(all_texts, task=task),
device=next(self.parameters()).device
)
# 分割嵌入并加权
split_sizes = [len(input_texts[name]) for name in self.channel_names]
channel_features = torch.split(embeddings, split_sizes, dim=0)
channel_features = torch.stack(channel_features, dim=1) # [batch, 4, 1024]
# 改进4带温度系数的softmax加权
weights = torch.softmax(self.channel_weights / self.temperature, dim=0)
weighted_features = channel_features * weights.unsqueeze(0).unsqueeze(-1)
# 拼接特征
combined = weighted_features.view(weighted_features.size(0), -1)
# 全连接层
return self.fc(combined)
def get_channel_weights(self):
"""获取各通道权重(带温度调节)"""
return torch.softmax(self.channel_weights / self.temperature, dim=0).detach().cpu().numpy()
def set_temperature(self, temperature):
"""设置温度值"""
self.temperature = temperature

View File

@ -3,7 +3,7 @@ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"]="1"
from torch.utils.data import DataLoader
import torch.optim as optim
from dataset import MultiChannelDataset
from modelV3_2 import VideoClassifierV3_2
from modelV5 import VideoClassifierV5
from sentence_transformers import SentenceTransformer
import torch.nn as nn
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, classification_report
@ -39,8 +39,8 @@ test_loader = DataLoader(test_dataset, batch_size=24, shuffle=False)
# 初始化模型和SentenceTransformer
sentence_transformer = SentenceTransformer("Thaweewat/jina-embedding-v3-m2v-1024")
model = VideoClassifierV3_2()
checkpoint_name = './filter/checkpoints/best_model_V3.2.pt'
model = VideoClassifierV5()
checkpoint_name = './filter/checkpoints/best_model_V5.pt'
# 模型保存路径
os.makedirs('./filter/checkpoints', exist_ok=True)
@ -80,15 +80,23 @@ print(f"Trainable parameters: {count_trainable_parameters(model)}")
# 训练循环
best_f1 = 0
total_step = 0
step = 0
eval_interval = 50
num_epochs = 8
for epoch in range(8):
total_steps = num_epochs * len(train_loader) # 总训练步数
T_max = 1.4 # 初始温度
T_min = 0.15 # 最终温度
for epoch in range(num_epochs):
model.train()
epoch_loss = 0
# 训练阶段
for batch_idx, batch in enumerate(train_loader):
temperature = T_max - (T_max - T_min) * (step / total_steps)
model.set_temperature(temperature)
optimizer.zero_grad()
# 传入文本字典和sentence_transformer
@ -100,18 +108,18 @@ for epoch in range(8):
epoch_loss += loss.item()
# 记录训练损失
writer.add_scalar('Train/Loss', loss.item(), total_step)
total_step += 1
writer.add_scalar('Train/Loss', loss.item(), step)
step += 1
# 每隔 eval_interval 步执行验证
if total_step % eval_interval == 0:
if step % eval_interval == 0:
eval_f1, eval_recall, eval_precision, eval_accuracy, eval_class_report = evaluate(model, eval_loader)
writer.add_scalar('Eval/F1', eval_f1, total_step)
writer.add_scalar('Eval/Recall', eval_recall, total_step)
writer.add_scalar('Eval/Precision', eval_precision, total_step)
writer.add_scalar('Eval/Accuracy', eval_accuracy, total_step)
writer.add_scalar('Eval/F1', eval_f1, step)
writer.add_scalar('Eval/Recall', eval_recall, step)
writer.add_scalar('Eval/Precision', eval_precision, step)
writer.add_scalar('Eval/Accuracy', eval_accuracy, step)
print(f"Step {total_step}")
print(f"Step {step}")
print(f" Eval F1: {eval_f1:.4f} | Eval Recall: {eval_recall:.4f} | Eval Precision: {eval_precision:.4f} | Eval Accuracy: {eval_accuracy:.4f}")
print(" Eval Class Report:")
for cls, metrics in eval_class_report.items():
@ -123,6 +131,7 @@ for epoch in range(8):
best_f1 = eval_f1
torch.save(model.state_dict(), checkpoint_name)
print(" Saved best model")
print("Channel weights: ", model.get_channel_weights())
# 记录每个 epoch 的平均训练损失
avg_epoch_loss = epoch_loss / len(train_loader)
@ -159,17 +168,17 @@ for epoch in range(8):
print("\nTesting...")
model.load_state_dict(torch.load(checkpoint_name))
test_f1, test_recall, test_precision, test_accuracy, test_class_report = evaluate(model, test_loader)
writer.add_scalar('Test/F1', test_f1, total_step)
writer.add_scalar('Test/Recall', test_recall, total_step)
writer.add_scalar('Test/Precision', test_precision, total_step)
writer.add_scalar('Test/Accuracy', test_accuracy, total_step)
writer.add_scalar('Test/F1', test_f1, step)
writer.add_scalar('Test/Recall', test_recall, step)
writer.add_scalar('Test/Precision', test_precision, step)
writer.add_scalar('Test/Accuracy', test_accuracy, step)
print(f"Test F1: {test_f1:.4f} | Test Recall: {test_recall:.4f} | Test Precision: {test_precision:.4f} | Test Accuracy: {test_accuracy:.4f}")
print(" Test Class Report:")
for cls, metrics in test_class_report.items():
if cls.isdigit(): # 只打印类别的指标
print(f" Class {cls}: Precision: {metrics['precision']:.4f}, Recall: {metrics['recall']:.4f}, F1: {metrics['f1-score']:.4f}, Support: {metrics['support']}")
writer.add_scalar(f'Test/Class_{cls}_Precision', metrics['precision'], total_step)
writer.add_scalar(f'Test/Class_{cls}_Recall', metrics['recall'], total_step)
writer.add_scalar(f'Test/Class_{cls}_F1', metrics['f1-score'], total_step)
writer.add_scalar(f'Test/Class_{cls}_Precision', metrics['precision'], step)
writer.add_scalar(f'Test/Class_{cls}_Recall', metrics['recall'], step)
writer.add_scalar(f'Test/Class_{cls}_F1', metrics['f1-score'], step)
# 关闭 TensorBoard
writer.close()