对于人工智能代码的一些注释,便于理解
import numpy as np
data = np.array([
[80,200],
[90,230],
[104,245],
[112,274],
[125,259],
[135,262]
])#将data数据集转换为 NumPy 数组
# 设置初始斜率和截距
m=1
b=1
# 将自变量和因变量从数据集中提取出来
xarray=data[:, 0]
yreal = data[:,-1]
# 学习率设置为 0.001
(注意一会删掉这行),需要learningrate更改三个数值,0.001,0.0001,0.00001
learningrate = 0.001
#grandentdecent() 函数计算当前斜率和截距下的损失函数的梯度,即 MSE对截距和斜率的偏导数
#m是斜率,b是截距
def grandentdecent():
bslop = 0
for index, x in enumerate(xarray):
bslop = bslop +m*x+b- yreal[index]
bslop = bslop*2/len (xarray)
mslop=0
for index, x in enumerate(xarray):
mslop = mslop +m*x+b- yreal[index]
mslop = mslop*2/len(xarray)
return (bslop, mslop)
#train() 函数则是执行了 10次循环
def train():
for i in range(1,10):
bslop,mslop = grandentdecent()#计算每个数据点对斜率和截距的偏导数之和,并除以数据量得到平均值,从而得到了均方误差的偏导数
global m
m = m - mslop*learningrate
global b
b=b- bslop*learningrate
if (abs(mslop)<0.5 and abs (bslop)<0.5):
break
print('m={},b={}'.format(m,b))
if __name__=='__main__':
train()

