国产成人精品久久免费动漫-国产成人精品天堂-国产成人精品区在线观看-国产成人精品日本-a级毛片无码免费真人-a级毛片毛片免费观看久潮喷

您的位置:首頁技術(shù)文章
文章詳情頁

Python機器學(xué)習(xí)之基于Pytorch實現(xiàn)貓狗分類

瀏覽:8日期:2022-06-17 10:27:50
目錄一、環(huán)境配置二、數(shù)據(jù)集的準備三、貓狗分類的實例四、實現(xiàn)分類預(yù)測測試五、參考資料一、環(huán)境配置

安裝Anaconda

具體安裝過程,請點擊本文

配置Pytorch

pip install -i https://pypi.tuna.tsinghua.edu.cn/simple torchpip install -i https://pypi.tuna.tsinghua.edu.cn/simple torchvision二、數(shù)據(jù)集的準備

1.數(shù)據(jù)集的下載

kaggle網(wǎng)站的數(shù)據(jù)集下載地址:https://www.kaggle.com/lizhensheng/-2000

2.數(shù)據(jù)集的分類

將下載的數(shù)據(jù)集進行解壓操作,然后進行分類分類如下(每個文件夾下包括cats和dogs文件夾)

Python機器學(xué)習(xí)之基于Pytorch實現(xiàn)貓狗分類

三、貓狗分類的實例

導(dǎo)入相應(yīng)的庫

# 導(dǎo)入庫import torch.nn.functional as Fimport torch.optim as optimimport torchimport torch.nn as nnimport torch.nn.parallel import torch.optimimport torch.utils.dataimport torch.utils.data.distributedimport torchvision.transforms as transformsimport torchvision.datasets as datasets

設(shè)置超參數(shù)

# 設(shè)置超參數(shù)#每次的個數(shù)BATCH_SIZE = 20#迭代次數(shù)EPOCHS = 10#采用cpu還是gpu進行計算DEVICE = torch.device(’cuda’ if torch.cuda.is_available() else ’cpu’)

圖像處理與圖像增強

# 數(shù)據(jù)預(yù)處理 transform = transforms.Compose([ transforms.Resize(100), transforms.RandomVerticalFlip(), transforms.RandomCrop(50), transforms.RandomResizedCrop(150), transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])

讀取數(shù)據(jù)集和導(dǎo)入數(shù)據(jù)

# 讀取數(shù)據(jù) dataset_train = datasets.ImageFolder(’E:Cat_And_Dogkagglecats_and_dogs_smalltrain’, transform) print(dataset_train.imgs) # 對應(yīng)文件夾的label print(dataset_train.class_to_idx) dataset_test = datasets.ImageFolder(’E:Cat_And_Dogkagglecats_and_dogs_smallvalidation’, transform) # 對應(yīng)文件夾的label print(dataset_test.class_to_idx) # 導(dǎo)入數(shù)據(jù) train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=True)

定義網(wǎng)絡(luò)模型

# 定義網(wǎng)絡(luò)class ConvNet(nn.Module): def __init__(self):super(ConvNet, self).__init__()self.conv1 = nn.Conv2d(3, 32, 3)self.max_pool1 = nn.MaxPool2d(2)self.conv2 = nn.Conv2d(32, 64, 3) self.max_pool2 = nn.MaxPool2d(2) self.conv3 = nn.Conv2d(64, 64, 3) self.conv4 = nn.Conv2d(64, 64, 3) self.max_pool3 = nn.MaxPool2d(2) self.conv5 = nn.Conv2d(64, 128, 3) self.conv6 = nn.Conv2d(128, 128, 3) self.max_pool4 = nn.MaxPool2d(2) self.fc1 = nn.Linear(4608, 512) self.fc2 = nn.Linear(512, 1) def forward(self, x): in_size = x.size(0) x = self.conv1(x) x = F.relu(x) x = self.max_pool1(x) x = self.conv2(x) x = F.relu(x) x = self.max_pool2(x) x = self.conv3(x) x = F.relu(x) x = self.conv4(x) x = F.relu(x) x = self.max_pool3(x) x = self.conv5(x) x = F.relu(x) x = self.conv6(x) x = F.relu(x)x = self.max_pool4(x) # 展開x = x.view(in_size, -1)x = self.fc1(x)x = F.relu(x) x = self.fc2(x) x = torch.sigmoid(x) return x modellr = 1e-4 # 實例化模型并且移動到GPU model = ConvNet().to(DEVICE) # 選擇簡單暴力的Adam優(yōu)化器,學(xué)習(xí)率調(diào)低 optimizer = optim.Adam(model.parameters(), lr=modellr)

調(diào)整學(xué)習(xí)率

def adjust_learning_rate(optimizer, epoch): '''Sets the learning rate to the initial LR decayed by 10 every 30 epochs''' modellrnew = modellr * (0.1 ** (epoch // 5)) print('lr:',modellrnew) for param_group in optimizer.param_groups: param_group[’lr’] = modellrnew

定義訓(xùn)練過程

# 定義訓(xùn)練過程def train(model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device).float().unsqueeze(1) optimizer.zero_grad() output = model(data) # print(output) loss = F.binary_cross_entropy(output, target) loss.backward() optimizer.step() if (batch_idx + 1) % 10 == 0: print(’Train Epoch: {} [{}/{} ({:.0f}%)]tLoss: {:.6f}’.format( epoch, (batch_idx + 1) * len(data), len(train_loader.dataset), 100. * (batch_idx + 1) / len(train_loader), loss.item()))# 定義測試過程 def val(model, device, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device).float().unsqueeze(1) output = model(data) # print(output) test_loss += F.binary_cross_entropy(output, target, reduction=’mean’).item() pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in output]).to(device) correct += pred.eq(target.long()).sum().item() print(’nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)n’.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))

定義保存模型和訓(xùn)練

# 訓(xùn)練for epoch in range(1, EPOCHS + 1): adjust_learning_rate(optimizer, epoch) train(model, DEVICE, train_loader, optimizer, epoch) val(model, DEVICE, test_loader) torch.save(model, ’E:Cat_And_Dogkagglemodel.pth’)

訓(xùn)練結(jié)果

Python機器學(xué)習(xí)之基于Pytorch實現(xiàn)貓狗分類

四、實現(xiàn)分類預(yù)測測試

準備預(yù)測的圖片進行測試

from __future__ import print_function, divisionfrom PIL import Image from torchvision import transformsimport torch.nn.functional as F import torchimport torch.nn as nnimport torch.nn.parallel# 定義網(wǎng)絡(luò)class ConvNet(nn.Module): def __init__(self):super(ConvNet, self).__init__()self.conv1 = nn.Conv2d(3, 32, 3)self.max_pool1 = nn.MaxPool2d(2)self.conv2 = nn.Conv2d(32, 64, 3)self.max_pool2 = nn.MaxPool2d(2)self.conv3 = nn.Conv2d(64, 64, 3)self.conv4 = nn.Conv2d(64, 64, 3)self.max_pool3 = nn.MaxPool2d(2)self.conv5 = nn.Conv2d(64, 128, 3)self.conv6 = nn.Conv2d(128, 128, 3)self.max_pool4 = nn.MaxPool2d(2)self.fc1 = nn.Linear(4608, 512)self.fc2 = nn.Linear(512, 1) def forward(self, x):in_size = x.size(0)x = self.conv1(x)x = F.relu(x)x = self.max_pool1(x)x = self.conv2(x)x = F.relu(x)x = self.max_pool2(x)x = self.conv3(x)x = F.relu(x)x = self.conv4(x)x = F.relu(x)x = self.max_pool3(x)x = self.conv5(x)x = F.relu(x)x = self.conv6(x)x = F.relu(x)x = self.max_pool4(x)# 展開x = x.view(in_size, -1)x = self.fc1(x)x = F.relu(x)x = self.fc2(x)x = torch.sigmoid(x)return x# 模型存儲路徑model_save_path = ’E:Cat_And_Dogkagglemodel.pth’ # ------------------------ 加載數(shù)據(jù) --------------------------- ## Data augmentation and normalization for training# Just normalization for validation# 定義預(yù)訓(xùn)練變換# 數(shù)據(jù)預(yù)處理transform_test = transforms.Compose([ transforms.Resize(100), transforms.RandomVerticalFlip(), transforms.RandomCrop(50), transforms.RandomResizedCrop(150), transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) class_names = [’cat’, ’dog’] # 這個順序很重要,要和訓(xùn)練時候的類名順序一致 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # ------------------------ 載入模型并且訓(xùn)練 --------------------------- #model = torch.load(model_save_path)model.eval()# print(model) image_PIL = Image.open(’E:Cat_And_Dogkagglecats_and_dogs_smalltestcatscat.1500.jpg’)#image_tensor = transform_test(image_PIL)# 以下語句等效于 image_tensor = torch.unsqueeze(image_tensor, 0)image_tensor.unsqueeze_(0)# 沒有這句話會報錯image_tensor = image_tensor.to(device) out = model(image_tensor)pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in out]).to(device)print(class_names[pred])

預(yù)測結(jié)果

Python機器學(xué)習(xí)之基于Pytorch實現(xiàn)貓狗分類Python機器學(xué)習(xí)之基于Pytorch實現(xiàn)貓狗分類

實際訓(xùn)練的過程來看,整體看準確度不高。而經(jīng)過測試發(fā)現(xiàn),該模型只能對于貓進行識別,對于狗則會誤判。

五、參考資料

實現(xiàn)貓狗分類

到此這篇關(guān)于Python機器學(xué)習(xí)之基于Pytorch實現(xiàn)貓狗分類的文章就介紹到這了,更多相關(guān)Pytorch實現(xiàn)貓狗分類內(nèi)容請搜索好吧啦網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章希望大家以后多多支持好吧啦網(wǎng)!

標簽: Python 編程
相關(guān)文章:
主站蜘蛛池模板: 免费一级毛片在线播放视频 | 欧美成人片在线 | 欧美视频成人 | 亚洲精品播放 | 免费欧美黄色 | 欧美一级看片a免费观看 | 久久精品国产亚洲麻豆 | 韩国免费毛片在线看 | 久草视频在线免费看 | 亚洲精品影院一区二区 | 国产日韩精品视频一区二区三区 | 中国一级特黄视频 | 性高湖久久久久久久久 | www中文字幕 | 三级视频在线播放线观看 | 日本三级香港三级少妇 | 国产一级在线 | 久久精品7 | 国产美女主播一级成人毛片 | 男女视频免费观看 | 综合免费视频 | 成人午夜看片 | 天天摸天天爽视频69视频 | 精品日韩二区三区精品视频 | 91久久精品国产91久久性色tv | 成 人 亚洲 综合天堂 | 国产高清视频免费最新在线 | 亚洲精品国产综合99久久一区 | 久久中文字幕日韩精品 | www黄网站| 99久免费精品视频在线观看2 | 国产一级毛片国语版 | 黄色a站 | 日本欧美视频在线 | 欧美成人全部费免网站 | 欧美一级精品高清在线观看 | 欧美激情精品久久久久久久九九九 | 色综合视频一区二区观看 | 国产精品单位女同事在线 | 免费看片亚洲 | 日本一级在线播放线观看视频 |