From 0ed59f60d01633ddff0f9cc38c773a6be060389f Mon Sep 17 00:00:00 2001 From: alikia2x Date: Fri, 7 Mar 2025 01:31:18 +0800 Subject: [PATCH] add: the V6.1 filter model --- filter/embedding.py | 82 +++++++++------------------------ filter/modelV6_0.py | 31 ++----------- filter/modelV6_1.py | 108 ++++++++++++++++++++++++++++++++++++++++++++ filter/train.py | 45 ++++++++++-------- 4 files changed, 160 insertions(+), 106 deletions(-) create mode 100644 filter/modelV6_1.py diff --git a/filter/embedding.py b/filter/embedding.py index ccecc9a..8390337 100644 --- a/filter/embedding.py +++ b/filter/embedding.py @@ -31,10 +31,8 @@ def prepare_batch(batch_data, device="cpu"): return batch_tensor import onnxruntime as ort -from transformers import AutoTokenizer -from itertools import accumulate -def prepare_batch_per_token(batch_data, max_length=1024): +def prepare_batch_per_token(session, tokenizer, batch_data, max_length=1024): """ 将输入的 batch_data 转换为模型所需的输入格式 [batch_size, num_channels, seq_length, embedding_dim]。 @@ -42,69 +40,33 @@ def prepare_batch_per_token(batch_data, max_length=1024): batch_data (dict): 输入的 batch 数据,格式为 { "title": [text1, text2, ...], "description": [text1, text2, ...], - "tags": [text1, text2, ...], - "author_info": [text1, text2, ...] + "tags": [text1, text2, ...] } max_length (int): 最大序列长度。 返回: - torch.Tensor: 形状为 [batch_size, num_channels, seq_length, embedding_dim] 的张量。 + torch.Tensor: 形状为 [batch_size, num_channels, max_length, embedding_dim] 的张量。 """ - # 初始化 tokenizer 和 ONNX 模型 - tokenizer = AutoTokenizer.from_pretrained("alikia2x/jina-embedding-v3-m2v-1024") - session = ort.InferenceSession("./model/embedding_256/onnx/model.onnx") - # 1. 对每个通道的文本分别编码 - channel_embeddings = [] - for channel in ["title", "description", "tags", "author_info"]: - texts = batch_data[channel] # 获取当前通道的文本列表 + batch_size = len(batch_data["title"]) + batch_tensor = torch.zeros(batch_size, 3, max_length, 256) + for i in range(batch_size): + channel_embeddings = torch.zeros((3, 1024, 256)) + for j, channel in enumerate(["title", "description", "tags"]): + # 获取当前通道的文本 + text = batch_data[channel][i] + encoded_inputs = tokenizer(text, truncation=True, max_length=max_length, return_tensors='np') - # Step 1: 生成 input_ids 和 offsets - # 对每个文本单独编码,保留原始 token 长度 - encoded_inputs = [tokenizer(text, truncation=True, max_length=max_length, return_tensors='np') for text in texts] + # embeddings: [max_length, embedding_dim] + embeddings = torch.zeros((1024, 256)) + for idx, token in enumerate(encoded_inputs['input_ids'][0]): + inputs = { + "input_ids": ort.OrtValue.ortvalue_from_numpy(np.array([token])), + "offsets": ort.OrtValue.ortvalue_from_numpy(np.array([0], dtype=np.int64)) + } + output = session.run(None, inputs)[0] + embeddings[idx] = torch.from_numpy(output) + channel_embeddings[j] = embeddings + batch_tensor[i] = channel_embeddings - # 提取每个文本的 input_ids 长度(考虑实际的 token 数量) - input_ids_lengths = [len(enc["input_ids"][0]) for enc in encoded_inputs] - - # 生成 offsets: [0, len1, len1+len2, ...] - offsets = list(accumulate([0] + input_ids_lengths[:-1])) # 累积和,排除最后一个长度 - - # 将所有 input_ids 展平为一维数组 - flattened_input_ids = np.concatenate([enc["input_ids"][0] for enc in encoded_inputs], axis=0).astype(np.int64) - - # Step 2: 构建 ONNX 输入 - inputs = { - "input_ids": ort.OrtValue.ortvalue_from_numpy(flattened_input_ids), - "offsets": ort.OrtValue.ortvalue_from_numpy(np.array(offsets, dtype=np.int64)) - } - - # Step 3: 运行 ONNX 模型 - embeddings = session.run(None, inputs)[0] # 假设输出名为 "embeddings" - - # Step 4: 将输出重塑为 [batch_size, seq_length, embedding_dim] - # 注意:这里假设 ONNX 输出的形状是 [total_tokens, embedding_dim] - # 需要根据实际序列长度重新分组 - batch_size = len(texts) - embeddings_split = np.split(embeddings, np.cumsum(input_ids_lengths[:-1])) - padded_embeddings = [] - for emb, seq_len in zip(embeddings_split, input_ids_lengths): - # 对每个序列填充到 max_length - if seq_len > max_length: - # 如果序列长度超过 max_length,截断 - emb = emb[:max_length] - pad_length = 0 - else: - # 否则填充到 max_length - pad_length = max_length - seq_len - - # 填充到 [max_length, embedding_dim] - padded = np.pad(emb, ((0, pad_length), (0, 0)), mode='constant') - padded_embeddings.append(padded) - - # 确保所有填充后的序列形状一致 - embeddings_tensor = torch.tensor(np.stack(padded_embeddings), dtype=torch.float32) - channel_embeddings.append(embeddings_tensor) - - # 2. 将编码结果堆叠为 [batch_size, num_channels, seq_length, embedding_dim] - batch_tensor = torch.stack(channel_embeddings, dim=1) return batch_tensor \ No newline at end of file diff --git a/filter/modelV6_0.py b/filter/modelV6_0.py index 32502fa..227c3bd 100644 --- a/filter/modelV6_0.py +++ b/filter/modelV6_0.py @@ -5,8 +5,8 @@ import torch.nn.functional as F class VideoClassifierV6_0(nn.Module): def __init__(self, embedding_dim=256, seq_length=1024, hidden_dim=512, output_dim=3): super().__init__() - self.num_channels = 4 - self.channel_names = ['title', 'description', 'tags', 'author_info'] + self.num_channels = 3 + self.channel_names = ['title', 'description', 'tags'] # CNN特征提取层 self.conv_layers = nn.Sequential( @@ -65,29 +65,4 @@ class VideoClassifierV6_0(nn.Module): flat_features = conv_features.view(conv_features.size(0), -1) # [batch_size, 256] # 全连接层分类 - return self.fc(flat_features) - -# 损失函数保持不变 -class AdaptiveRecallLoss(nn.Module): - def __init__(self, class_weights, alpha=0.8, gamma=2.0, fp_penalty=0.5): - super().__init__() - self.class_weights = class_weights - self.alpha = alpha - self.gamma = gamma - self.fp_penalty = fp_penalty - - def forward(self, logits, targets): - ce_loss = F.cross_entropy(logits, targets, weight=self.class_weights, reduction='none') - pt = torch.exp(-ce_loss) - focal_loss = ((1 - pt) ** self.gamma) * ce_loss - - class_mask = F.one_hot(targets, num_classes=len(self.class_weights)) - class_weights = (self.alpha + (1 - self.alpha) * pt.unsqueeze(-1)) * class_mask - recall_loss = (class_weights * focal_loss.unsqueeze(-1)).sum(dim=1) - - probs = F.softmax(logits, dim=1) - fp_mask = (targets != 0) & (torch.argmax(logits, dim=1) == 0) - fp_loss = self.fp_penalty * probs[:, 0][fp_mask].pow(2).sum() - - total_loss = recall_loss.mean() + fp_loss / len(targets) - return total_loss \ No newline at end of file + return self.fc(flat_features) \ No newline at end of file diff --git a/filter/modelV6_1.py b/filter/modelV6_1.py new file mode 100644 index 0000000..7b9527a --- /dev/null +++ b/filter/modelV6_1.py @@ -0,0 +1,108 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class VideoClassifierV6_1(nn.Module): + def __init__(self, embedding_dim=256, seq_length=1024, hidden_dim=256, output_dim=3, num_heads=4): + super().__init__() + self.num_channels = 3 + self.channel_names = ['title', 'description', 'tags'] + self.embedding_dim = embedding_dim + self.hidden_dim = hidden_dim # 每个通道处理后的特征维度 + + # 通道独立处理模块(每个通道独立的Transformer编码器) + self.channel_processors = nn.ModuleList() + for _ in range(self.num_channels): + self.channel_processors.append( + nn.Sequential( + # 自注意力层 + nn.MultiheadAttention( + embed_dim=embedding_dim, + num_heads=num_heads, + dropout=0.1 + ), + # 层归一化和前馈网络 + nn.LayerNorm(embedding_dim), + nn.Linear(embedding_dim, hidden_dim), + nn.GELU(), + nn.Linear(hidden_dim, hidden_dim), + nn.LayerNorm(hidden_dim) + ) + ) + + # 通道权重(可学习,Sigmoid约束) + self.channel_weights = nn.Parameter(torch.ones(self.num_channels)) + + # 全连接层(扩展维度) + self.fc = nn.Sequential( + nn.Linear(self.num_channels * hidden_dim, 1024), # 拼接后的特征维度 + nn.BatchNorm1d(1024), + nn.Dropout(0.2), + nn.GELU(), + nn.Linear(1024, 512), + nn.BatchNorm1d(512), + nn.Dropout(0.2), + nn.GELU(), + nn.Linear(512, output_dim) + ) + + self._init_weights() + + def _init_weights(self): + """权重初始化(Xavier初始化)""" + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.MultiheadAttention): + # 初始化MultiheadAttention的参数(输入投影和输出投影) + for name, param in m.named_parameters(): + if "in_proj" in name or "out_proj" in name: + if "weight" in name: + nn.init.xavier_uniform_(param) + elif "bias" in name: + nn.init.zeros_(param) + elif isinstance(m, nn.LayerNorm): + nn.init.ones_(m.weight) + + def forward(self, channel_features: torch.Tensor): + """ + 输入格式: [batch_size, num_channels, seq_length, embedding_dim] + 输出格式: [batch_size, output_dim] + """ + batch_size = channel_features.size(0) + processed_channels = [] + + for c in range(self.num_channels): + # 提取当前通道的特征 [B, S, E] + c_data = channel_features[:, c] + # 转置为 [S, B, E] 以适配MultiheadAttention + c_data = c_data.permute(1, 0, 2) + + # 通道独立处理 + x = c_data + for layer in self.channel_processors[c]: + if isinstance(layer, nn.MultiheadAttention): + # 自注意力层需要显式提供键、值 + x = layer(x, x, x)[0] + else: + x = layer(x) + # 转回 [B, S, hidden_dim] + x = x.permute(1, 0, 2) + # 全局池化(序列维度平均) + pooled = x.mean(dim=1) # [B, hidden_dim] + processed_channels.append(pooled) + + # 堆叠通道特征 [B, C, hidden_dim] + processed_channels = torch.stack(processed_channels, dim=1) + + # 应用通道权重(Sigmoid约束) + weights = torch.sigmoid(self.channel_weights).unsqueeze(0).unsqueeze(-1) # [1, C, 1] + weighted_features = processed_channels * weights # [B, C, hidden_dim] + + # 拼接所有通道特征 + combined = weighted_features.view(batch_size, -1) # [B, C*hidden_dim] + + # 全连接层分类 + return self.fc(combined) \ No newline at end of file diff --git a/filter/train.py b/filter/train.py index dca219f..3528cda 100644 --- a/filter/train.py +++ b/filter/train.py @@ -4,14 +4,16 @@ import numpy as np from torch.utils.data import DataLoader import torch.optim as optim from dataset import MultiChannelDataset -from filter.modelV3_15 import AdaptiveRecallLoss, VideoClassifierV3_15 +from filter.modelV6_1 import VideoClassifierV6_1 from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, classification_report import os import torch from torch.utils.tensorboard import SummaryWriter import time -from embedding import prepare_batch -import torch.nn as nn +from embedding import prepare_batch_per_token +import onnxruntime as ort +from transformers import AutoTokenizer +from torch import nn run_name = f"run_{time.strftime('%Y%m%d_%H%M')}" @@ -24,6 +26,8 @@ writer = SummaryWriter(log_dir=log_dir) train_dataset = MultiChannelDataset('./data/filter/labeled_data.jsonl', mode='train') eval_dataset = MultiChannelDataset('./data/filter/labeled_data.jsonl', mode='eval') +samples_count = len(train_dataset) + # 加载test数据集 test_file = './data/filter/test.jsonl' if not os.path.exists(test_file): @@ -50,21 +54,26 @@ class_weights = torch.tensor( device='cpu' ) -# 初始化模型和SentenceTransformer -model = VideoClassifierV3_15() -checkpoint_name = './filter/checkpoints/best_model_V3.17.pt' +model = VideoClassifierV6_1() +checkpoint_name = './filter/checkpoints/best_model_V6.2-test2.pt' + +# 初始化tokenizer和embedding模型 +tokenizer = AutoTokenizer.from_pretrained("alikia2x/jina-embedding-v3-m2v-1024") +session = ort.InferenceSession("./model/embedding_256/onnx/model.onnx") # 模型保存路径 os.makedirs('./filter/checkpoints', exist_ok=True) # 优化器 -optimizer = optim.AdamW(model.parameters(), lr=4e-4) -criterion = AdaptiveRecallLoss( - class_weights=class_weights, - alpha=0.9, # 召回率权重 - gamma=1.6, # 困难样本聚焦 - fp_penalty=0.8 # 假阳性惩罚强度 -) +eval_interval = 20 +num_epochs = 20 +total_steps = samples_count * num_epochs / train_loader.batch_size +warmup_rate = 0.1 +optimizer = optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-5) +cosine_annealing_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_steps - int(total_steps * warmup_rate)) +warmup_scheduler = optim.lr_scheduler.LinearLR(optimizer, start_factor=0.1, end_factor=1.0, total_iters=int(total_steps * warmup_rate)) +scheduler = optim.lr_scheduler.SequentialLR(optimizer, schedulers=[warmup_scheduler, cosine_annealing_scheduler], milestones=[int(total_steps * warmup_rate)]) +criterion = nn.CrossEntropyLoss(weight=class_weights) def count_trainable_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) @@ -76,7 +85,7 @@ def evaluate(model, dataloader): with torch.no_grad(): for batch in dataloader: - batch_tensor = prepare_batch(batch['texts']) + batch_tensor = prepare_batch_per_token(session, tokenizer, batch['texts']) logits = model(batch_tensor) preds = torch.argmax(logits, dim=1) all_preds.extend(preds.cpu().numpy()) @@ -98,8 +107,6 @@ print(f"Trainable parameters: {count_trainable_parameters(model)}") # 训练循环 best_f1 = 0 step = 0 -eval_interval = 20 -num_epochs = 8 for epoch in range(num_epochs): model.train() @@ -108,8 +115,9 @@ for epoch in range(num_epochs): # 训练阶段 for batch_idx, batch in enumerate(train_loader): optimizer.zero_grad() + - batch_tensor = prepare_batch(batch['texts']) + batch_tensor = prepare_batch_per_token(session, tokenizer, batch['texts']) logits = model(batch_tensor) @@ -142,7 +150,8 @@ for epoch in range(num_epochs): best_f1 = eval_f1 torch.save(model.state_dict(), checkpoint_name) print(" Saved best model") - print("Channel weights: ", model.get_channel_weights()) + scheduler.step() + writer.add_scalar('Train/LR', scheduler.get_last_lr()[0], step) # 记录每个 epoch 的平均训练损失 avg_epoch_loss = epoch_loss / len(train_loader)