生成数据
import torch
def synthetic_data ( w, b, num_examples) :
X= torch. normal( 0 , 1 , ( num_examples, len ( w) ) )
y= torch. matmul( X, w) + b
y+= torch. normal( 0 , 0.1 , y. shape)
return X, y. reshape( ( - 1 , 1 ) )
true_w= torch. tensor( [ 2 , - 3.4 ] ) . reshape( 2 , 1 )
true_b= 4.2
features, label= synthetic_data( true_w, true_b, 500 )
print ( f'features的值 { features[ 0 ] } ,label的值 { label[ 0 ] } ' )
可视化
import matplotlib. pyplot as plt
fig = plt. figure( )
ax = fig. add_subplot( projection= '3d' )
ax. scatter( features[ : , 0 ] , features[ : , 1 ] , label)
plt. scatter( features[ : , 0 ] , label)
plt. scatter( features[ : , 1 ] , label)
读取数据集
import random
def data_iter ( batch_size, features, label) :
num_examples= len ( features)
indices= list ( range ( num_examples) )
random. shuffle( indices)
for i in range ( 0 , num_examples, batch_size) :
batch_indices= torch. tensor( indices[ i: min ( i+ batch_size, num_examples) ] )
yield features[ batch_indices] , label[ batch_indices]
for x, y in data_iter( 10 , features, label) :
print ( x, y)
break
搭建模型
def linreg ( x, w, b) :
return torch. matmul( x, w) + b
损失函数
def squared_loss ( hat_y, y) :
return ( hat_y- y. reshape( hat_y. shape) ) ** 2 / 2
随机梯度下降
def sgd ( params, lr, batch_size) :
with torch. no_grad( ) :
for param in params:
param-= lr* param. grad/ batch_size
param. grad. zero_( )
训练
w= torch. normal( 0 , 0.01 , size= ( 2 , 1 ) , requires_grad= True )
b= torch. tensor( 1.0 , requires_grad= True )
lr= 0.03
num_epochs= 5
batch_size= 5
net= linreg
loss= squared_loss
for epoch in range ( num_epochs) :
for x, y in data_iter( batch_size, features, label) :
l= loss( net( x, w, b) , y)
l. sum ( ) . backward( )
sgd( [ w, b] , lr, batch_size)
with torch. no_grad( ) :
train_l= loss( net( features, w, b) , label)
print ( f'第 { epoch+ 1 } 轮损失为 { train_l. mean( ) } ' )
w- true_w, b- true_b
读取数据
from torch. utils import data
def data_iter ( data_array, batch_size, is_train= True ) :
datasets= data. TensorDataset( * data_array)
return data. DataLoader( datasets, batch_size, shuffle= is_train)
from torch import nn
net= nn. Sequential( nn. Linear( 2 , 1 ) )
net[ 0 ] . weight. data. normal_( 0 , 0.01 )
net[ 0 ] . bias. data. fill_( 0 )
损失函数
loss= nn. MSELoss( )
优化器
trainer= torch. optim. SGD( net. parameters( ) , lr= 0.03 )
训练
epochs_num= 5
for epoch in range ( epochs_num) :
for x, y in data_iter( ( features, label) , batch_size) :
l= loss( net( x) , y)
trainer. zero_grad( )
l. backward( )
trainer. step( )
l= loss( net( features) , label)
print ( f'第 { epoch+ 1 } 轮损失为 { l} ' )
net[ 0 ] . weight. data. reshape( true_w. shape) - true_w, net[ 0 ] . bias. data- true_b