线性回归
from sklearn.linear_model import LinearRegression
import numpy as np
## Data (Apple stock prices)
apple = np.array([155, 156, 157])
n = len(apple)
## One-liner
model = LinearRegression().fit(np.arange(n).reshape((n,1)), apple)
## Result & puzzle
print(model.predict([[3],[4]]))
逻辑回归
from sklearn.linear_model import LogisticRegression
import numpy as np
## Data (#cigarettes, cancer)
X = np.array([[0, "No"],
[10, "No"],
[60, "Yes"],
[90, "Yes"]])
## One-liner
model = LogisticRegression().fit(X[:,0].reshape(n,1), X[:,1])
## Result & puzzle
print(model.predict([[2],[12],[13],[40],[90]]))
K-Means聚类
## Dependencies
from sklearn.cluster import KMeans
import numpy as np
## Data (Work (h) / Salary ($))
X = np.array([[35, 7000], [45, 6900], [70, 7100],
[20, 2000], [25, 2200], [15, 1800]])
## One-liner
kmeans = KMeans(n_clusters=2).fit(X)
## Result & puzzle
cc = kmeans.cluster_centers_
print(cc)
kmeans.predict([[10, 1000],[20,5000]])
K-近邻(KNN)
## Dependencies
from sklearn.neighbors import KNeighborsRegressor
import numpy as np
## Data (House Size (square meters) / House Price ($))
X = np.array([[35, 30000], [45, 45000], [40, 50000],
[35, 35000], [25, 32500], [40, 40000]])
## One-liner
KNN = KNeighborsRegressor(n_neighbors=3).fit(X[:,0].reshape(-1,1), X[:,1])
## Result & puzzle
res = KNN.predict([[30]])
print(res)
神经网络分析
## Dependencies
from sklearn.neural_network import MLPRegressor
import numpy as np
## Questionaire data (WEEK, YEARS, BOOKS, PROJECTS, EARN, RATING)
X = np.array(
[[20, 11, 20, 30, 4000, 3000],
[12, 4, 0, 0, 1000, 1500],
[2, 0, 1, 10, 0, 1400],
[35, 5, 10, 70, 6000, 3800],
[30, 1, 4, 65, 0, 3900],
[35, 1, 0, 0, 0, 100],
[15, 1, 2, 25, 0, 3700],
[40, 3, -1, 60, 1000, 2000],
[40, 1, 2, 95, 0, 1000],
[10, 0, 0, 0, 0, 1400],
[30, 1, 0, 50, 0, 1700],
[1, 0, 0, 45, 0, 1762],
[10, 32, 10, 5, 0, 2400],
[5, 35, 4, 0, 13000, 3900],
[8, 9, 40, 30, 1000, 2625],
[1, 0, 1, 0, 0, 1900],
[1, 30, 10, 0, 1000, 1900],
[7, 16, 5, 0, 0, 3000]])
## One-liner
neural_net = MLPRegressor(max_iter=10000).fit(X[:,:-1], X[:,-1])
## Result
res = neural_net.predict([[0, 0, 0, 0, 0]])
print(res)
决策树学习
## Dependencies
from sklearn import tree
import numpy as np
## Data: student scores in (math, language, creativity) --> study field
X = np.array([[9, 5, 6, "computer science"],
[1, 8, 1, "linguistics"],
[5, 7, 9, "art"]])
## One-liner
Tree = tree.DecisionTreeClassifier().fit(X[:,:-1], X[:,-1])
## Result & puzzle
student_0 = Tree.predict([[8, 6, 5]])
print(student_0)
student_1 = Tree.predict([[3, 7, 9]])
print(student_1)
计算方差最小的数据行
## Dependencies
import numpy as np
## Data (rows: stocks / cols: stock prices)
X = np.array([[25,27,29,30],
[1,5,3,2],
[12,11,8,3],
[1,1,2,2],
[2,6,2,2]])
## One-liner
# Find the stock with smallest variance
min_row = min([(i,np.var(X[i,:])) for i in range(len(X))], key=lambda x: x[1])
## Result & puzzle
print("Row with minimum variance: " + str(min_row[0]))
print("Variance: " + str(min_row[1]))
基本统计
## Dependencies
import numpy as np
## Stock Price Data: 5 companies
# (row=[price_day_1, price_day_2, ...])
x = np.array([[8, 9, 11, 12],
[1, 2, 2, 1],
[2, 8, 9, 9],
[9, 6, 6, 3],
[3, 3, 3, 3]])
## One-liner
avg, var, std = np.average(x, axis=1), np.var(x, axis=1), np.std(x, axis=1)
## Result & puzzle
print("Averages: " + str(avg))
print("Variances: " + str(var))
print("Standard Deviations: " + str(std))
支持向量机分类
## Dependencies
from sklearn import svm
import numpy as np
## Data: student scores in (math, language, creativity) --> study field
X = np.array([[9, 5, 6, "computer science"],
[10, 1, 2, "computer science"],
[1, 8, 1, "literature"],
[4, 9, 3, "literature"],
[0, 1, 10, "art"],
[5, 7, 9, "art"]])
## One-liner
svm = svm.SVC().fit(X[:,:-1], X[:,-1])
## Result & puzzle
student_0 = svm.predict([[3, 3, 6]])
print(student_0)
student_1 = svm.predict([[8, 1, 1]])
print(student_1)
随机森林分类
## Dependencies
import numpy as np
from sklearn.ensemble import RandomForestClassifier
## Data: student scores in (math, language, creativity) --> study field
X = np.array([[9, 5, 6, "computer science"],
[5, 1, 5, "computer science"],
[8, 8, 8, "computer science"],
[1, 10, 7, "literature"],
[1, 8, 1, "literature"],
[5, 7, 9, "art"],
[1, 1, 6, "art"]])
## One-liner
Forest = RandomForestClassifier(n_estimators=10).fit(X[:,:-1], X[:,-1])
## Result
students = Forest.predict([[8, 6, 5],
[3, 7, 9],
[2, 2, 1]])
print(students)
参考:(美) 迈耶 Mayer, Christian.Python一行流:像专家一样写代码[M].电子工业出版社,2021.