improve: use custom loss for the model
This commit is contained in:
parent
bf919da1ea
commit
60be2274e4
@ -5,19 +5,19 @@ import torch
|
||||
|
||||
def main():
|
||||
model = CompactPredictor(16).to('cpu', dtype=torch.float32)
|
||||
model.load_state_dict(torch.load('./pred/checkpoints/model_20250315_0504.pt'))
|
||||
model.load_state_dict(torch.load('./pred/checkpoints/model_20250315_0530.pt'))
|
||||
model.eval()
|
||||
# inference
|
||||
initial = 999917
|
||||
initial = 999269
|
||||
last = initial
|
||||
start_time = '2025-03-11 18:43:52'
|
||||
start_time = '2025-03-15 01:03:21'
|
||||
for i in range(1, 48):
|
||||
hour = i / 30
|
||||
hour = i / 0.5
|
||||
sec = hour * 3600
|
||||
time_d = np.log2(sec)
|
||||
data = [time_d, np.log2(initial+1), # time_delta, current_views
|
||||
5.231997, 6.473876, 7.063624, 7.026946, 6.9753, 8.599954, 9.448747, 7.236474, 10.881226, 12.128971, 13.351179, # grows_feat
|
||||
0.7798611111, 0.2541666667, 24.778674 # time_feat
|
||||
2.801318, 3.455128, 3.903391, 3.995577, 4.641488, 5.75131, 6.723868, 6.105322, 8.141023, 9.576701, 10.665067, # grows_feat
|
||||
0.043993, 0.72057, 28.000902 # time_feat
|
||||
]
|
||||
np_arr = np.array([data])
|
||||
tensor = torch.from_numpy(np_arr).to('cpu', dtype=torch.float32)
|
||||
|
@ -7,12 +7,38 @@ import torch
|
||||
from dataset import VideoPlayDataset, collate_fn
|
||||
from pred.model import CompactPredictor
|
||||
|
||||
def asymmetricHuberLoss(delta=1.0, beta=1.3):
|
||||
"""
|
||||
创建一个可调用的非对称 Huber 损失函数。
|
||||
|
||||
参数:
|
||||
delta (float): Huber 损失的 delta 参数。
|
||||
beta (float): 控制负误差惩罚的系数。
|
||||
|
||||
返回:
|
||||
callable: 可调用的损失函数。
|
||||
"""
|
||||
def loss_function(input, target):
|
||||
error = input - target
|
||||
abs_error = torch.abs(error)
|
||||
|
||||
linear_loss = abs_error - 0.5 * delta
|
||||
quadratic_loss = 0.5 * error**2
|
||||
|
||||
loss = torch.where(abs_error < delta, quadratic_loss, linear_loss)
|
||||
loss = torch.where(error < 0, beta * loss, loss)
|
||||
|
||||
return torch.mean(loss)
|
||||
|
||||
return loss_function
|
||||
|
||||
def train(model, dataloader, device, epochs=100):
|
||||
writer = SummaryWriter(f'./pred/runs/play_predictor_{time.strftime("%Y%m%d_%H%M")}')
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3, weight_decay=0.01)
|
||||
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-3,
|
||||
total_steps=len(dataloader)*30)
|
||||
criterion = torch.nn.MSELoss()
|
||||
# Huber loss
|
||||
criterion = asymmetricHuberLoss(delta=1.0, beta=2.1)
|
||||
|
||||
model.train()
|
||||
global_step = 0
|
||||
@ -55,7 +81,7 @@ def train(model, dataloader, device, epochs=100):
|
||||
t = float(torch.exp2(targets[r])) - 1
|
||||
o = float(torch.exp2(outputs[r])) - 1
|
||||
d = features[r].cpu().numpy()[0]
|
||||
speed = np.exp2(features[r].cpu().numpy()[6]) / 6
|
||||
speed = np.exp2(features[r].cpu().numpy()[8]) / 6
|
||||
time_diff = np.exp2(d) / 3600
|
||||
inc = speed * time_diff
|
||||
model_error = abs(t - o)
|
||||
|
Loading…
Reference in New Issue
Block a user