LitchiCheng 发表于 2024-10-21 21:29

一起读《动手学深度学习(PyTorch版)》- 线性神经网络 - softmax两种实现对比

<div class='showpostmsg'><p>手动实现sgd、train等过程</p>

<pre>
<code>import torch
import torchvision
from torch.utils import data
from torchvision import transforms
import matplotlib.pyplot as plt

trans = transforms.ToTensor()
mnist_train = torchvision.datasets.FashionMNIST(root = "./data", train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.FashionMNIST(root = "./data", train=True, transform=trans, download=True)

# print(len(mnist_train), len(mnist_test), mnist_train, mnist_train)
print(mnist_train.shape)

def get_fasion_mnist_labels(labels):
    # labels is num dict
    text_lables = ['t-shirt', 'trouser', 'pullover', "dress", "coat", "sandal", "shirt", "sneaker", "bag", "ankle boot"]
    return for i in labels]

print(get_fasion_mnist_labels())

def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
    figsize = (num_cols * scale, num_rows * scale)
    _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)
    axes = axes.flatten()
    for i, (ax, img) in enumerate(zip(axes, imgs)):
      if torch.is_tensor(img):
            ax.imshow(img.numpy())
      else:
            ax.imshow(img)
      ax.axes.get_xaxis().set_visible(False)
      ax.axes.get_yaxis().set_visible(False)
      if titles:
            ax.set_title(titles)
    plt.show()
    return axes

X, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))
# show_images(X.reshape(18,28,28), 2, 9 ,titles=get_fasion_mnist_labels(y))

batch_size = 256

def get_dataloader_workers():
    return 16

train_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers())
X1, y1 = next(iter(train_iter))
# show_images(X1.reshape(batch_size, 28, 28), 16, 16, titles=get_fasion_mnist_labels(y1))

def load_data_fashion_mnist(batch_size, resize=None):
    trans =
    if resize:
      trans.insert(0, transforms.Resize(resize))
    trans = transforms.Compose(trans)
    mnist_train = torchvision.datasets.FashionMNIST(root="./data", train=True, transform=trans, download=True)
    mnist_test = torchvision.datasets.FashionMNIST(root="./data", train=False, transform=trans, download=True)
    return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),
            data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))

batch_size = 256
train_iter, test_iter = load_data_fashion_mnist(batch_size)
num_inputs = 784
num_outputs = 10
W=torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)
b=torch.zeros(num_outputs, requires_grad=True)

def softmax(X):
    X_exp = torch.exp(X)
    partition = X_exp.sum(1, keepdim=True)
    return X_exp / partition

def net(X):
    return softmax(torch.matmul(X.reshape((-1, W.shape)), W) + b)

def cross_entropy(y_hat, y):
    return - torch.log(y_hat)

def accurancy(y_hat, y):
    if len(y_hat.shape) &gt; 1 and y_hat.shape &gt; 1:
      y_hat = y_hat.argmax(axis=1)
    cmp = y_hat.type(y.dtype) == y
    return float(cmp.type(y.dtype).sum())

class Accumulator:
    def __init__(self, n) -&gt; None:
      self.data = *n
   
    def add(self, *args):
      self.data =

    def reset(self):
      self.data = * len(self.data)

    def __getitem__(self, idx):
      return self.data

def evaluate_accurancy(net, data_iter):
    if isinstance(net, torch.nn.Module):
      net.eval()
    metric = Accumulator(2)
    with torch.no_grad():
      for X, y in data_iter:
            metric.add(accurancy(net(X), y), y.numel())
    return metric / metric

print(evaluate_accurancy(net, test_iter))

def train_epoch_ch3(net, train_iter, loss, updater):
    if isinstance(net, torch.nn.Module):
      net.train()
    metric = Accumulator(3)
    for X, y in train_iter:
      y_hat = net(X)
      l = loss(y_hat, y)
      if isinstance(updater, torch.optim.Optimizer):
            updater.zero_grad()
            l.mean().bachward()
            updater.step()
      else:
            l.sum().backward()
            updater(X.shape)
      metric.add(float(l.sum()), accurancy(y_hat, y), y.numel())
    return metric / metric, metric / metric

def set_axes(axes, xlable, ylable, xlim, ylim, xscale, yscale, legend):
    axes.set_xlabel(xlable)
    axes.set_ylabel(ylable)
    axes.set_xscale(xscale)
    axes.set_yscale(yscale)
    axes.set_xlim(xlim)
    axes.set_ylim(ylim)
    if legend:
      axes.legend(legend)
      axes.grid()

class Animator:
    def __init__(self, xlable=None, ylable=None, legend=None, xlim=None, ylim=None,
    xscale='linear', yscale='linear',fmts=('-','m--','g-.','r:'), nrows=1, ncols=1, figsize=(3.5, 2.5)):
      if legend is None:
            legend = []
      self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
      if nrows * ncols == 1:
            self.axes =
      self.config_axes = lambda: set_axes(self.axes, xlable, ylable, xlim, ylim, xscale, yscale, legend)
      self.X, self.Y, self.fmts = None, None, fmts
   
    def add(self, x, y):
      if not hasattr(y, "__len__"):
            y=
      n = len(y)
      if not hasattr(x, "__len__"):
            x = * n
      if not self.X:
            self.X = [[] for _ in range(n)]
      if not self.Y:
            self.Y = [[] for _ in range(n)]
      for i, (a,b) in enumerate(zip(x, y)):
            if a is not None and b is not None:
                self.X.append(a)
                self.Y.append(b)
      self.axes.cla()
      for x, y, fmt in zip(self.X, self.Y, self.fmts):
            self.axes.plot(x, y, fmt)
      self.config_axes()
      

def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
    animator = Animator(xlable='epoch', xlim=, ylim=, legend=['train loss', "train acc", "test acc"])
    for epoch in range(num_epochs):
      train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
      test_acc = evaluate_accurancy(net, test_iter)
      animator.add(epoch+1, train_metrics+(test_acc, ))
    train_loss, train_acc = train_metrics
    assert train_loss &lt; 0.5, train_loss
    assert train_acc &lt; 1 and train_acc &gt; 0.7, train_acc
    assert test_acc &lt; 1 and test_acc &gt; 0.7, test_acc

lr = 0.1

def sgd(params, lr, batch_size):
    with torch.no_grad():
      for param in params:
            param -= lr * param.grad / batch_size
            param.grad.zero_()

def updater(batch_size):
    return sgd(, lr, batch_size)

num_epochs = 20
train_ch3(net,train_iter,test_iter, cross_entropy, num_epochs, updater)
plt.show()

</code></pre>

<p>使用torch库中的优化器、训练函数</p>

<pre>
<code>import torch
import torchvision
from torch.utils import data
from torchvision import transforms
import matplotlib.pyplot as plt
from torch import nn

def get_dataloader_workers():
    return 6

def load_data_fashion_mnist(batch_size, resize=None):
    trans =
    if resize:
      trans.insert(0, transforms.Resize(resize))
    trans = transforms.Compose(trans)
    mnist_train = torchvision.datasets.FashionMNIST(root="./data", train=True, transform=trans, download=True)
    mnist_test = torchvision.datasets.FashionMNIST(root="./data", train=False, transform=trans, download=True)
    return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),
            data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))

def accurancy(y_hat, y):
    if len(y_hat.shape) &gt; 1 and y_hat.shape &gt; 1:
      y_hat = y_hat.argmax(axis=1)
    cmp = y_hat.type(y.dtype) == y
    return float(cmp.type(y.dtype).sum())

class Accumulator:
    def __init__(self, n) -&gt; None:
      self.data = *n
   
    def add(self, *args):
      self.data =

    def reset(self):
      self.data = * len(self.data)

    def __getitem__(self, idx):
      return self.data

def evaluate_accurancy(net, data_iter):
    if isinstance(net, torch.nn.Module):
      net.eval()
    metric = Accumulator(2)
    with torch.no_grad():
      for X, y in data_iter:
            metric.add(accurancy(net(X), y), y.numel())
    return metric / metric

def train_epoch_ch3(net, train_iter, loss, updater):
    if isinstance(net, torch.nn.Module):
      net.train()
    metric = Accumulator(3)
    for X, y in train_iter:
      y_hat = net(X)
      l = loss(y_hat, y)
      if isinstance(updater, torch.optim.Optimizer):
            updater.zero_grad()
            l.mean().backward()
            updater.step()
      else:
            l.sum().backward()
            updater(X.shape)
      metric.add(float(l.sum()), accurancy(y_hat, y), y.numel())
    return metric / metric, metric / metric

def set_axes(axes, xlable, ylable, xlim, ylim, xscale, yscale, legend):
    axes.set_xlabel(xlable)
    axes.set_ylabel(ylable)
    axes.set_xscale(xscale)
    axes.set_yscale(yscale)
    axes.set_xlim(xlim)
    axes.set_ylim(ylim)
    if legend:
      axes.legend(legend)
      axes.grid()

class Animator:
    def __init__(self, xlable=None, ylable=None, legend=None, xlim=None, ylim=None,
    xscale='linear', yscale='linear',fmts=('-','m--','g-.','r:'), nrows=1, ncols=1, figsize=(3.5, 2.5)):
      if legend is None:
            legend = []
      self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
      if nrows * ncols == 1:
            self.axes =
      self.config_axes = lambda: set_axes(self.axes, xlable, ylable, xlim, ylim, xscale, yscale, legend)
      self.X, self.Y, self.fmts = None, None, fmts
   
    def add(self, x, y):
      if not hasattr(y, "__len__"):
            y=
      n = len(y)
      if not hasattr(x, "__len__"):
            x = * n
      if not self.X:
            self.X = [[] for _ in range(n)]
      if not self.Y:
            self.Y = [[] for _ in range(n)]
      for i, (a,b) in enumerate(zip(x, y)):
            if a is not None and b is not None:
                self.X.append(a)
                self.Y.append(b)
      self.axes.cla()
      for x, y, fmt in zip(self.X, self.Y, self.fmts):
            self.axes.plot(x, y, fmt)
      self.config_axes()
      

def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
    animator = Animator(xlable='epoch', xlim=, ylim=, legend=['train loss', "train acc", "test acc"])
    for epoch in range(num_epochs):
      train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
      test_acc = evaluate_accurancy(net, test_iter)
      animator.add(epoch+1, train_metrics+(test_acc, ))
    train_loss, train_acc = train_metrics
    assert train_loss &lt; 0.5, train_loss
    assert train_acc &lt; 1 and train_acc &gt; 0.7, train_acc
    assert test_acc &lt; 1 and test_acc &gt; 0.7, test_acc

batch_size = 256
train_iter, test_iter = load_data_fashion_mnist(batch_size)
net = nn.Sequential(nn.Flatten(), nn.Linear(28*28, 10))

def init_weights(m):
    if type(m) == nn.Linear:
      nn.init.normal_(m.weight, std=0.01)

net.apply(init_weights)
loss = nn.CrossEntropyLoss(reduction='none')
trainer = torch.optim.SGD(net.parameters(), lr=0.1)
num_epochs = 20
train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)   
plt.show()</code></pre>

<p> &nbsp;</p>

<p>训练耗用cpu</p>

<p>&nbsp; 实际损失图</p>

<p>&nbsp;</p>
</div><script>                                        var loginstr = '<div class="locked">查看本帖全部内容,请<a href="javascript:;"   style="color:#e60000" class="loginf">登录</a>或者<a href="https://bbs.eeworld.com.cn/member.php?mod=register_eeworld.php&action=wechat" style="color:#e60000" target="_blank">注册</a></div>';
                                       
                                        if(parseInt(discuz_uid)==0){
                                                                                                (function($){
                                                        var postHeight = getTextHeight(400);
                                                        $(".showpostmsg").html($(".showpostmsg").html());
                                                        $(".showpostmsg").after(loginstr);
                                                        $(".showpostmsg").css({height:postHeight,overflow:"hidden"});
                                                })(jQuery);
                                        }                </script><script type="text/javascript">(function(d,c){var a=d.createElement("script"),m=d.getElementsByTagName("script"),eewurl="//counter.eeworld.com.cn/pv/count/";a.src=eewurl+c;m.parentNode.insertBefore(a,m)})(document,523)</script>

Jacktang 发表于 2024-10-23 07:22

<p>手动实现sgd、train等过程这些代码是自己码的吧,厉害</p>
页: [1]
查看完整版本: 一起读《动手学深度学习(PyTorch版)》- 线性神经网络 - softmax两种实现对比