from sklearn import datasets
iris = datasets.load_iris()
# print(iris.DESCR)
import pandas as pd
import numpy as np
# use pandas as dataframe and merge features and targets
feature = pd.DataFrame(iris.data, columns=iris.feature_names)
target = pd.DataFrame(iris.target, columns=['target'])
iris_data = pd.concat([feature, target], axis=1)
# keep only sepal length in cm, sepal width in cm and target
iris_data = iris_data[['sepal length (cm)', 'sepal width (cm)', 'target']]
# keep only Iris-Setosa and Iris-Versicolour classes
iris_data = iris_data[iris_data.target <= 1]
# print(iris_data.head(5))
model_selection
函式把资料分为两群 train、testfrom sklearn.model_selection import train_test_split
train_feature, test_feature, train_target, test_target = train_test_split(
iris_data[['sepal length (cm)', 'sepal width (cm)']], iris_data[['target']], test_size=0.3, random_state=4
)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
train_feature = sc.fit_transform(train_feature)
test_feature = sc.fit_transform(test_feature)
train_target = np.array(train_target)
test_target = np.array(test_target)
# print(train_feature, test_feature)
# 1) model
# f = wx + b, sigmoid at the end
class LogisticRegression():
def __init__(self):
super(LogisticRegression, self).__init__()
def linear(self, x, w, b):
return np.dot(x, w) + b
def sigmoid(self, x):
return 1/(1 + np.exp(-x))
def forward(self, x, w, b):
y_pred = self.sigmoid(self.linear(x, w, b)).reshape(-1, 1)
return y_pred
model = LogisticRegression()
# 2) loss and optimizer
learning_rate = 0.01
# CrossEntropy
class BinaryCrossEntropy():
def __init__(self):
super(BinaryCrossEntropy, self).__init__()
def cross_entropy(self, y_pred, target):
x = target*np.log(y_pred) + (1-target)*np.log(1-y_pred)
return -(np.mean(x))
def forward(self, y_pred, target):
return self.cross_entropy(y_pred, target)
# GradientDescent
class GradientDescent():
def __init__(self, lr=0.1):
super(GradientDescent, self).__init__()
self.lr = lr
def forward(self, w, b, y_pred, target, data):
w = w - self.lr * np.mean(data * (y_pred - target), axis=0)
b = b - self.lr * np.mean((y_pred - target), axis=0)
return w, b
criterion = BinaryCrossEntropy()
optimizer = GradientDescent(lr=learning_rate)
# 3) training loop
w = np.array([0, 0])
b = np.array([0])
num_epochs = 100
for epoch in range(num_epochs):
for i, data in enumerate(train_feature):
# forward pass and loss
y_pred = model.forward(data, w, b)
loss = criterion.forward(y_pred, train_target[i])
# update
w, b = optimizer.forward(w, b, y_pred, train_target[i], data)
if (epoch+1) % 10 == 0:
print(f'epoch {epoch + 1}: loss = {loss}')
# checking testing accuracy
y_pred = model.forward(test_feature, w, b)
y_pred_cls = y_pred.round()
acc = np.equal(y_pred_cls, test_target).sum() / float(test_target.shape[0])
print(f'accuracy = {acc: .4f}')
>>: Day22-pytorch(5)简单示范regression模型
游戏大厂EA被骇,被盗的资料在暗网贩售。 破解层层防护的方法是,弄到被盗的cookie获得EA使用的...
前言 在昨天我们学会使用memo去记忆我们的组件,达成避免re-render的状态。 而我们今天会学...
题目 解题思路: 首先注意到计算点数的公式a * l + b,可以发现如果把字串全部拿掉了话,那麽公...
前言 上一篇介绍了jmap基本的应用,接着就要讲jmap的重点!!导出内存映像文件,而导出的方式分为...
最喜欢的是窗外清晨时的一望无际的雪白,心灵就像被大自然安抚,获得平静 今天来聊聊那近三年空服生涯中...