import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, Dataset from consts import TRAIN_DATA from tqdm import tqdm from model import DogCatClassifier from dogs_cats_ds import DogCatDataset def train(model: nn.Module, train_loader: DataLoader, criterion, optimizer, device, epochs): model.to(device) model.train() for epoch in tqdm(range(epochs)): running_loss = 0.0 correct = 0 total = 0 for i, (img, lab) in enumerate(train_loader): img, lab = img.to(device), lab.to(device).float().view(-1, 1) optimizer.zero_grad() out = model(img) loss = criterion(out, lab) loss.backward() optimizer.step() running_loss += loss.item() pred = (out > 0.5).float() total += lab.size(0) correct += (pred == lab).sum().item() if (i + 1) % 50: print(f'yo its epoch {epoch + 1} out of {epochs} and we on minibatch {i + 1} / {len(train_loader)}. Loss lookin like: {running_loss/100:.4f}, acc lookin like {100 * correct / total :.2f}%') running_loss = 0.0 total = 0 correct = 0 if __name__ == "__main__": device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(f'Using device: {device}') dog_train_dataset = DogCatDataset(TRAIN_DATA) dog_train_loader = DataLoader(dog_train_dataset, batch_size = 32, shuffle = True) # since its train, ok to shuffle model = DogCatClassifier() criterion = nn.BCELoss() optimizer = optim.Adam(model.parameters(), lr = 0.001) print(model) train(model = model, train_loader = dog_train_loader, criterion = criterion, optimizer = optimizer, device = device, epochs = 10) torch.save(model.state_dict(), 'dog_cat_classifier.pth') print('done w train, model saved')