逻辑回归
目的:
理解逻辑回归模型,掌握逻辑回归模型的参数估计算法。
要求:
实现两种损失函数的参数估计(1,无惩罚项;2.加入对参数的惩罚),可以采用梯度下降、共轭梯度或者牛顿法等。
验证:
1.可以手工生成两个分别类别数据(可以用高斯分布),验证你的算法。考察类条件分布不满足朴素贝叶斯假设,会得到什么样的结果。
2.逻辑回归有广泛的用处,例如广告预测。可以到UCI网站上,找一实际数据加以测试。
handle my data:
import numpy as np
from matplotlib import pyplot as plt
import math
def two_dimension_generator(n, positive_rate, train_data_rate, precision = 0.003,loc = 1,scale = 0.4,k = 2):
'''
生成二维符合正态分布的训练和测试数据以及各自对应的标签
:param n:正例数据和反例数据的总数
:param positive_rate:数据中正例所占的比例
:param train_data_rate:训练数据占所有数据的比例
:param precision:初始数据划分时的精度
:return:返回训练数据,测试数据以及分类线
'''
print("two dimension data generating...")
x = np.mat(np.random.normal(loc=loc, scale=scale, size=n)).transpose()
y = np.mat(np.random.normal(loc=loc, scale=scale, size=n)).transpose()
b = y.max() - k * x.min()# y = kx + b 中的 b
#以下代码块生成分类线
for i in range(99999999999):
n_positive = 0
for j in range(n):
if(y[j] - k * x[j] - b >= 0): #多层循环时注意内部的循环变量到底应该是哪个
n_positive = n_positive + 1
if (n_positive / n >= positive_rate):
#print(n_positive)
break
else:
b = b - precision
#以下代码块按比例生训练数据,测试数据
x_train = x[0:int(train_data_rate * n)]#切片
y_train = y[0:int(train_data_rate * n)]
x_test = x[int(train_data_rate * n) + 1:n]
y_test = y[int(train_data_rate * n) + 1:n]
x_train_positive = list()
y_train_positive = list()
x_train_negtive = list()
y_train_negtive = list()
x_test_positive = list()
y_test_positive = list()
x_test_negtive = list()
y_test_negtive = list()
for i in range(int(train_data_rate * n)):
if(y[i] - k * x[i] - b >= 0):
x_train_positive.append(np.sum(x[i][0]))
y_train_positive.append(np.sum(y[i][0]))
else:
x_train_negtive.append(np.sum(x[i][0]))
y_train_negtive.append(np.sum(y[i][0]))
for i in range(int(train_data_rate * n),n):#range()的区间用法
if (y[i] - k * x[i] - b >= 0):
x_test_positive.append(np.sum(x[i][0]))
y_test_positive.append(np.sum(y[i][0]))
else:
x_test_negtive.append(np.sum(x[i][0]))
y_test_negtive.append(np.sum(y[i][0]))
#print([x.max(),k * x.max() + b,x.min(),k * x.min() + b])
return [np.mat(x_train_positive).transpose(),np.mat(y_train_positive).transpose(),np.mat(x_train_negtive).transpose(),np.mat(y_train_negtive).transpose()],[np.mat(x_test_positive).transpose(),np.mat(y_test_positive).transpose(),np.mat(x_test_negtive).transpose(),np.mat(y_test_negtive).transpose()], [x.max(),k * x.max() + b,x.min(),k * x.min() + b]
train_data,test_data,line_data = two_dimension_generator(200, 0.4, 0.5)
plt.subplot(221)
# 必须得矩阵竖着的一列才能正常显示
plt.plot(train_data[0], train_data[1], "go")
plt.plot(train_data[2], train_data[3], "mo")
plt.plot([line_data[0], line_data[2]],[line_data[1],line_data[3]])#!!!前面全是横坐标,后面全是纵坐标
plt.title("data maked by the line")
plt.xlabel('x')
plt.ylabel('y')
#组装训练数据
temp1 = np.r_[train_data[0],train_data[2]]#将矩阵上下拼接
temp2 = np.r_[train_data[1],train_data[3]]
X_train = np.c_[temp1,temp2]#将矩阵左右拼接,X_train为2列的矩阵
X_temp = np.c_[np.mat(np.ones(X_train.shape[0])).transpose(),X_train]
X_train = X_temp
Y_trian = np.mat(np.linspace(0, 0, X_train.shape[0])).transpose()#1列的0和1构成的矩阵
for i in range(train_data[0].size):
Y_trian[i][0] = 1
#组装测试数据
temp1 = np.r_[test_data[0],test_data[2]]#将矩阵上下拼接
temp2 = np.r_[test_data[1],test_data[3]]
X_test = np.c_[temp1,temp2]#将矩阵左右拼接,X_train为2列的矩阵
X_temp = np.c_[np.mat(np.ones(X_test.shape[0])).transpose(),X_test]
X_test = X_temp
Y_test = np.mat(np.linspace(0, 0, X_test.shape[0])).transpose()#1列的0和1构成的矩阵
for i in range(test_data[0].size):
Y_test[i][0] = 1
def partial_derivative(X, Y, w):
'''
根据X和Y求loss对w的偏导
:param X:偏导公式中的X
:param Y:偏导公式中的Y
:param w:偏导公式中的w
:return:除以了数据总量的偏导,除数据总量用于防止上溢
'''
pd_ending = np.mat(np.zeros(X.shape[1])).transpose()
for pd_i in range(X.shape[0]):
pd_temp1 = math.exp(np.sum(np.dot(w.transpose(), X[pd_i].transpose())))
pd_ending = pd_ending - X[pd_i].transpose() * (np.sum(Y[pd_i][0]) - pd_temp1 / (1 + pd_temp1) )
#print(pd_ending / X.shape[0])
return pd_ending / X.shape[0]
def loss(X,Y,w,lamda = 0.1):
'''
计算损失函数,即需要用梯度下降法最小化的loss
:param X:偏导公式中的X
:param Y:偏导公式中的Y
:param w:偏导公式中的w
:param lamda: 惩罚项(正则项),用于防止过拟合
:return:除以了数据总量的loss,除数据总量用于防止上溢
'''
loss_ending = lamda * np.sum(np.dot(w.transpose(),w)) / 2
for loss_i in range(X.shape[0]):
loss_temp = np.sum(np.dot(w.transpose(),X[loss_i].transpose()))#wTx
loss_ending = loss_ending - np.sum(Y[loss_i][0]) * loss_temp + math.log(1 + math.exp(loss_temp))#math.log默认e为底
return loss_ending / X.shape[0]
def gradient_decent(X,Y,lr,precision = 0.0000001,lamda = 0.3):
'''
梯度下降法函数,数据格式如下非常量数据均为numpy中矩阵形式:
:param X: [[ 1. 1.18111171 1.19855526]
[ 1. 0.69218864 0.14772317]
[ 1. 0.87462883 0.75794103]
[ 1. 1.24048911 0.95684246]
[ 1. 1.06937979 0.29508903]
[ 1. 1.14704526 0.87367971]] X为增广后的矩阵(增了一列1)!!!
:param Y: eg. [[1.]
[1.]
[0.]
[0.]
[0.]]
:param lr: 学习率(步长,learning)
:param precision: 迭代停止精度
:param lamda: 作为超参的惩罚项系数,用于防止过拟合
:return: 符合给定参数条件的系数向量
'''
print("gradient decent calculating...")
gd_w = np.mat(np.zeros(X.shape[1])).transpose()
gd_loss = loss(X,Y,gd_w,lamda)
for gd_i in range(999999999):
gd_w = gd_w - lr * partial_derivative(X,Y,gd_w)
gd_temp_loss = loss(X,Y,gd_w,lamda)
#if math.fabs(gd_temp_loss - gd_loss) < precision:
print(math.fabs(gd_loss - gd_temp_loss))
if(math.fabs(gd_loss - gd_temp_loss) < precision):
break
else:
#if gd_temp_loss > gd_loss:
# lamda = lamda * 0.5
gd_loss = gd_temp_loss
return gd_w
def accuracy(X,Y,k):
'''
计算数据标签和计算标签的拟合准确(准确零)
:param X:矩阵X
:param Y:标签矩阵
:param k:系数矩阵
:return:准确零
'''
ac_temp_1 = 0
for ac_i in range(X.shape[0]):#在不同的函数中用不同的变量名!!!eg.ac_i.神奇的python内存机制...
#print(np.dot(k.transpose(), X[ac_i].transpose()))
#print(Y[ac_i][0])
ac_temp = np.sum(np.dot(k.transpose(), X[ac_i].transpose()))
if ac_temp >= 0 and np.sum(Y[ac_i][0]) == 1:#if条件语句中用all((,...,))表示并列条件,两个()!!!
ac_temp_1 = ac_temp_1 + 1
if ac_temp < 0 and np.sum(Y[ac_i][0]) == 0:
ac_temp_1 = ac_temp_1 + 1
return ac_temp_1 / X.shape[0]
k_1 = gradient_decent(X_train,Y_trian,0.1)
plt.subplot(222)
# 必须得矩阵竖着的一列才能正常显示
plt.plot(train_data[0], train_data[1], "go")
plt.plot(train_data[2], train_data[3], "mo")
plt.plot([line_data[0], line_data[2]],[line_data[1],line_data[3]])
plt.plot([line_data[0], line_data[2]],[-(np.sum(k_1[0]) + np.sum(k_1[1]) * line_data[0])/np.sum(k_1[2]),-(np.sum(k_1[0]) + np.sum(k_1[1]) * line_data[2])/np.sum(k_1[2])])
plt.title("train data classify")
plt.xlabel('x')
plt.ylabel('y')
plt.subplot(223)
# 必须得矩阵竖着的一列才能正常显示
plt.plot(test_data[0], test_data[1], "go")
plt.plot(test_data[2], test_data[3], "mo")
plt.plot([line_data[0], line_data[2]],[line_data[1],line_data[3]])
plt.plot([line_data[0], line_data[2]],[-(np.sum(k_1[0]) + np.sum(k_1[1]) * line_data[0])/np.sum(k_1[2]),-(np.sum(k_1[0]) + np.sum(k_1[1]) * line_data[2])/np.sum(k_1[2])])
plt.title("test data classify")
plt.xlabel('x')
plt.ylabel('y')
k_2 = gradient_decent(X_train,Y_trian,0.1,precision = 0.00001,lamda = 0.0)
plt.subplot(224)
plt.plot(test_data[0], test_data[1], "go")
plt.plot(test_data[2], test_data[3], "mo")
plt.plot([line_data[0], line_data[2]],[line_data[1],line_data[3]])
plt.plot([line_data[0], line_data[2]],[-(np.sum(k_2[0]) + np.sum(k_2[1]) * line_data[0])/np.sum(k_2[2]),-(np.sum(k_2[0]) + np.sum(k_2[1]) * line_data[2])/np.sum(k_2[2])])
plt.plot([line_data[0], line_data[2]],[-(np.sum(k_1[0]) + np.sum(k_1[1]) * line_data[0])/np.sum(k_1[2]),-(np.sum(k_1[0]) + np.sum(k_1[1]) * line_data[2])/np.sum(k_1[2])])
plt.title("test data classify no regulation")
plt.xlabel('x')
plt.ylabel('y')
print("accuracy for test data with regulation: ")
print(accuracy(X_test,Y_test,k_1))
print("accuracy for test data without regulation: ")
print(accuracy(X_test,Y_test,k_2))
plt.show()
handle uci data
import numpy as np
import math
def partial_derivative(X, Y, w):
'''
根据X和Y求loss对w的偏导
:param X:偏导公式中的X
:param Y:偏导公式中的Y
:param w:偏导公式中的w
:return:除以了数据总量的偏导,除数据总量用于防止上溢
'''
pd_ending = np.mat(np.zeros(X.shape[1])).transpose()
for pd_i in range(X.shape[0]):
pd_temp1 = math.exp(np.sum(np.dot(w.transpose(), X[pd_i].transpose())))
pd_ending = pd_ending - X[pd_i].transpose() * (np.sum(Y[pd_i][0]) - pd_temp1 / (1 + pd_temp1) )
#print(pd_ending / X.shape[0])
return pd_ending / X.shape[0]#???
def loss(X,Y,w,lamda = 0.1):
'''
计算损失函数,即需要用梯度下降法最小化的loss
:param X:偏导公式中的X
:param Y:偏导公式中的Y
:param w:偏导公式中的w
:param lamda: 惩罚项(正则项),用于防止过拟合
:return:除以了数据总量的loss,除数据总量用于防止上溢
'''
loss_ending = lamda * np.sum(np.dot(w.transpose(),w)) / 2
for loss_i in range(X.shape[0]):
loss_temp = np.sum(np.dot(w.transpose(),X[loss_i].transpose()))#wTx
#print(loss_temp)
loss_ending = loss_ending - np.sum(Y[loss_i][0]) * loss_temp + math.log(1 + math.exp(loss_temp))#math.log默认e为底
return loss_ending / X.shape[0]
def gradient_decent(X,Y,lr,precision = 0.000005,lamda = 0.3):
'''
梯度下降法函数,数据格式如下非常量数据均为numpy中矩阵形式:
:param X: [[ 1. 1.18111171 1.19855526]
[ 1. 0.69218864 0.14772317]
[ 1. 0.87462883 0.75794103]
[ 1. 1.24048911 0.95684246]
[ 1. 1.06937979 0.29508903]
[ 1. 1.14704526 0.87367971]] X为增广后的矩阵(增了一列1)!!!
:param Y: eg. [[1.]
[1.]
[0.]
[0.]
[0.]]
:param lr: 学习率(步长,learning)
:param precision: 迭代停止精度
:param lamda: 作为超参的惩罚项系数,用于防止过拟合
:return: 符合给定参数条件的系数向量
'''
print("gradient decent calculating...")
gd_w = np.mat(np.zeros(X.shape[1])).transpose()
gd_loss = loss(X,Y,gd_w,lamda)
for gd_i in range(999999999):
gd_w = gd_w - lr * partial_derivative(X,Y,gd_w)
gd_temp_loss = loss(X,Y,gd_w,lamda)
#if math.fabs(gd_temp_loss - gd_loss) < precision:
print(math.fabs(gd_loss - gd_temp_loss))
if(math.fabs(gd_loss - gd_temp_loss) < precision):
break
else:
if gd_temp_loss > gd_loss:
lamda = lamda * 0.5 #学习率衰减,实际应用中使得拟合的更好/快
gd_loss = gd_temp_loss
return gd_w
def accuracy(X,Y,k):
'''
计算数据标签和计算标签的拟合准确(准确零)
:param X:矩阵X
:param Y:标签矩阵
:param k:系数矩阵
:return:准确零
'''
ac_temp_1 = 0
for ac_i in range(X.shape[0]):#在不同的函数中用不同的变量名!!!eg.ac_i.神奇的python内存机制...
#print(np.dot(k.transpose(), X[ac_i].transpose()))
#print(Y[ac_i][0])
ac_temp = np.sum(np.dot(k.transpose(), X[ac_i].transpose()))
if ac_temp >= 0 and np.sum(Y[ac_i][0]) == 1:#if条件语句中用all((,...,))表示并列条件,两个()!!!
ac_temp_1 = ac_temp_1 + 1
if ac_temp < 0 and np.sum(Y[ac_i][0]) == 0:
ac_temp_1 = ac_temp_1 + 1
return ac_temp_1 / X.shape[0]
filename = "wine.txt"
original_data = np.loadtxt(filename,dtype=np.float32)
matrix_data = np.mat(original_data)
#初始化数据matrix_data
Y = matrix_data[:,0]#提取矩阵某一列
matrix_data = np.delete(matrix_data,0,1)#删除矩阵第一列
for i_1 in range(matrix_data.shape[0]):
for i_2 in range(matrix_data.shape[1]):
matrix_data[i_1,i_2] = matrix_data[i_1,i_2] / 60 #缩小,否则上溢
matrix_data = np.c_[np.mat(np.ones(matrix_data.shape[0])).transpose(),matrix_data]
matrix_data = matrix_data
#print(matrix_data)
#print(matrix_data)
#print(Y)
k = gradient_decent(matrix_data,Y,0.1)
ending = accuracy(matrix_data,Y,k)
print("accuracy for train data:")
print(ending)
#p = np.delete(matrix_data,177,0)#删除矩阵的某一行
cui data: wine.txt
1 14.23 1.71 2.43 15.6 127 2.8 3.06 .28 2.29 5.64 1.04 3.90 1065
1 13.0 1.78 2.14 11.0 100 2.65 2.76 .26 1.28 4.38 1.05 3.4 1050
1 13.16 2.36 2.67 18.6 101 2.8 3.24 .3 2.81 5.68 1.03 3.17 1185
1 14.37 1.95 2.5 16.8 113 3.85 3.49 .24 2.18 7.8 .86 3.45 1480
1 13.24 2.59 2.87 21 118 2.8 2.69 .39 1.80 4.30 1.04 2.93 735
1 14.0 1.76 2.45 15.0 110 3.27 3.39 .34 1.97 6.75 1.05 2.85 1450
1 14.39 1.87 2.45 14.6 96 2.5 2.50 .3 1.98 5.25 1.00 3.58 1290
1 14.06 2.15 2.61 17.6 121 2.6 2.51 .31 1.25 5.05 1.06 3.58 1295
1 14.83 1.64 2.17 14 97 2.8 2.98 .29 1.98 5.0 1.08 2.85 1045
1 13.86 1.35 2.27 16 98 2.98 3.15 .20 1.85 7.20 1.01 3.55 1045
1 14.1 2.16 2.3 18 105 2.95 3.30 .20 2.38 5.75 1.25 3.17 1510
1 14.10 1.48 2.30 16.8 95 2.0 2.43 .26 1.57 5 1.17 2.80 1280
1 13.75 1.73 2.41 16 89 2.6 2.76 .29 1.81 5.6 1.15 2.9 1320
1 14.75 1.73 2.39 11.4 91 3.1 3.69 .43 2.81 5.4 1.25 2.73 1150
1 14.38 1.87 2.38 10 100 3.3 3.64 .29 2.96 7.5 1.0 3 1547
1 13.63 1.81 2.7 17.0 110 2.85 2.91 .3 1.46 7.3 1.28 2.88 1310
1 14.3 1.90 2.70 20 120 2.8 3.14 .33 1.97 6.0 1.07 2.65 1280
1 13.83 1.57 2.60 20 115 2.95 3.4 .4 1.70 6.6 1.13 2.57 1130
1 14.19 1.59 2.48 16.5 108 3.3 3.93 .30 1.86 8.7 1.23 2.80 1680
1 13.64 3.1 2.56 15.0 116 2.7 3.03 .17 1.66 5.1 .96 3.36 845
1 14.06 1.63 2.28 16 126 3 3.17 .24 2.1 5.65 1.09 3.71 780
1 12.93 3.8 2.65 18.6 100 2.41 2.41 .25 1.98 4.5 1.03 3.50 770
1 13.71 1.86 2.36 16.6 101 2.61 2.88 .27 1.69 3.8 1.11 4 1035
1 12.85 1.6 2.50 17.8 95 2.48 2.37 .26 1.46 3.93 1.09 3.63 1015
1 13.5 1.81 2.61 20 96 2.53 2.61 .28 1.66 3.50 1.10 3.80 845
1 13.05 2.05 3.20 25 124 2.63 2.68 .47 1.90 3.58 1.13 3.0 830
1 13.39 1.77 2.60 16.1 93 2.85 2.94 .34 1.45 4.8 .90 3.20 1195
1 13.3 1.70 2.14 17 94 2.4 2.19 .27 1.35 3.95 1.00 2.77 1285
1 13.87 1.9 2.8 19.4 107 2.95 2.97 .37 1.76 4.5 1.25 3.4 915
1 14.00 1.68 2.21 16 96 2.65 2.33 .26 1.98 4.7 1.04 3.59 1035
1 13.73 1.5 2.7 22.5 101 3 3.25 .29 2.38 5.7 1.19 2.71 1285
1 13.58 1.66 2.36 19.1 106 2.86 3.19 .20 1.95 6.9 1.09 2.88 1515
1 13.68 1.83 2.36 17.0 104 2.40 2.69 .40 1.97 3.84 1.23 2.87 990
1 13.76 1.53 2.7 19.5 130 2.95 2.74 .5 1.35 5.4 1.25 3 1235
1 13.51 1.8 2.65 19 110 2.35 2.53 .29 1.54 4.0 1.1 2.87 1095
1 13.48 1.81 2.41 20.5 100 2.7 2.98 .26 1.86 5.1 1.04 3.47 920
1 13.28 1.64 2.84 15.5 110 2.6 2.68 .34 1.36 4.6 1.09 2.78 880
1 13.05 1.65 2.55 18 98 2.45 2.43 .29 1.44 4.25 1.10 2.51 1105
1 13.07 1.5 2.1 15.5 98 2.4 2.64 .28 1.37 3.7 1.18 2.69 1020
1 14.20 3.99 2.51 13.0 128 3 3.04 .0 2.08 5.1 .89 3.53 760
1 13.56 1.71 2.31 16.0 117 3.15 3.29 .34 2.34 6.13 .95 3.38 795
1 13.41 3.84 2.10 18.8 90 2.45 2.68 .27 1.48 4.28 .91 3 1035
1 13.88 1.89 2.59 15 101 3.25 3.56 .17 1.7 5.43 .88 3.56 1095
1 13.24 3.98 2.29 17.5 103 2.64 2.63 .30 1.66 4.36 .80 3 680
1 13.05 1.77 2.1 17 107 3 3 .28 2.03 5.04 .88 3.35 885
1 14.21 4.04 2.44 18.9 111 2.85 2.65 .3 1.25 5.24 .87 3.33 1080
1 14.38 3.59 2.28 16 100 3.25 3.17 .27 2.19 4.9 1.04 3.44 1065
1 13.9 1.68 2.10 16 101 3.1 3.39 .21 2.14 6.1 .91 3.33 985
1 14.1 2.00 2.4 18.8 103 2.75 2.90 .30 2.38 6.0 1.07 2.75 1060
1 13.94 1.73 2.27 17.4 108 2.88 3.54 .30 2.08 8.90 1.10 3.1 1260
1 13.05 1.73 2.04 12.4 90 2.70 3.27 .17 2.91 7.0 1.10 2.91 1150
1 13.83 1.65 2.6 17.0 94 2.45 2.99 .20 2.29 5.6 1.24 3.37 1265
1 13.80 1.75 2.40 14 111 3.88 3.74 .30 1.87 7.05 1.01 3.26 1190
1 13.77 1.9 2.68 17.1 115 3 2.79 .39 1.68 6.3 1.13 2.93 1375
1 13.74 1.67 2.25 16.4 118 2.6 2.9 .21 1.60 5.85 .90 3.0 1060
1 13.56 1.73 2.46 20.5 116 2.96 2.78 .0 2.45 6.25 .98 3.03 1120
1 14.20 1.7 2.3 16.3 118 3.0 3 .26 2.03 6.38 .94 3.31 970
1 13.29 1.97 2.68 16.8 100 3 3.23 .31 1.66 6 1.07 2.84 1270
1 13.70 1.43 2.5 16.7 108 3.4 3.67 .19 2.04 6.8 .89 2.87 1285
0 12.37 .94 1.36 10.6 88 1.98 .57 .28 .40 1.95 1.05 1.80 520
0 12.33 1.1 2.28 16 101 2.05 1.09 .63 .41 3.27 1.25 1.67 680
0 12.64 1.36 2.00 16.8 100 2.00 1.41 .53 .60 5.75 .98 1.59 450
0 13.67 1.25 1.90 18 94 2.1 1.79 .30 .73 3.8 1.23 2.46 630
0 12.37 1.13 2.16 19 87 3.5 3.1 .19 1.87 4.45 1.20 2.87 420
0 12.17 1.45 2.53 19 104 1.89 1.75 .45 1.03 2.95 1.45 2.23 355
0 12.37 1.21 2.56 18.1 98 2.40 2.65 .37 2.08 4.6 1.19 2.3 678
0 13.11 1.01 1.7 15 78 2.98 3.18 .26 2.28 5.3 1.10 3.18 502
0 12.37 1.17 1.90 19.6 78 2.11 0 .27 1.04 4.68 1.10 3.48 510
0 13.34 .94 2.36 17 110 2.53 1.3 .55 .40 3.17 1.00 1.93 750
0 12.21 1.19 1.75 16.8 151 1.85 1.28 .14 2.5 2.85 1.28 3.07 718
0 12.29 1.61 2.21 20.4 103 1.1 1.00 .37 1.46 3.05 .906 1.80 870
0 13.86 1.51 2.67 25 86 2.95 2.86 .21 1.87 3.38 1.36 3.16 410
0 13.49 1.66 2.24 24 87 1.88 1.84 .27 1.03 3.74 .98 2.78 472
0 12.99 1.67 2.6 30 139 3.3 2.89 .21 1.96 3.35 1.31 3.5 985
0 11.96 1.09 2.3 21 101 3.38 2.14 .13 1.65 3.21 .99 3.13 886
0 11.66 1.88 1.90 16 97 1.61 1.57 .34 1.15 3.8 1.23 2.14 428
0 13.03 .9 1.71 16 86 1.95 2.03 .24 1.46 4.6 1.19 2.48 392
0 11.84 2.89 2.23 18 110 1.70 1.30 .43 .95 2.65 .96 2.50 500
0 12.33 .99 1.95 14.8 136 1.9 1.85 .35 2.76 3.4 1.06 2.31 750
0 12.7 3.87 2.4 23 101 2.83 2.55 .43 1.95 2.57 1.19 3.13 463
0 10 .90 0 19 86 2.40 2.26 .3 1.43 2.5 1.38 3.10 278
0 12.70 1.81 2.0 18.8 86 2.0 2.53 .26 1.77 3.9 1.16 3.14 714
0 12.08 1.13 2.51 24 78 0 1.58 .4 1.4 2.0 1.31 2.70 630
0 13.05 3.86 2.30 22.5 85 1.65 1.59 .61 1.60 4.8 .84 2.01 515
0 11.84 .89 2.58 18 94 2.0 2.21 .20 2.35 3.05 .79 3.08 520
0 12.67 .98 2.24 18 99 2.0 1.94 .3 1.46 2.60 1.23 3.16 450
0 12.16 1.61 2.31 22.8 90 1.78 1.69 .43 1.56 2.45 1.33 2.26 495
0 11.65 1.67 2.60 26 88 1.90 1.61 .4 1.34 2.6 1.36 3.21 562
0 11.64 2.06 2.46 21.6 84 1.95 1.69 .48 1.35 2.8 1 2.75 680
0 12.08 1.33 2.3 23.6 70 2.0 1.59 .40 1.38 1.74 1.07 3.21 625
0 12.08 1.83 2.30 18.5 81 1.6 1.5 .50 1.64 2.4 1.08 2.27 480
0 10 1.51 2.40 20 86 1.45 1.25 .5 1.63 3.6 1.05 2.65 450
0 12.69 1.53 2.26 20.7 80 1.38 1.46 .58 1.60 3.05 .96 2.06 495
0 12.29 2.83 2.20 18 88 2.45 2.25 .25 1.99 2.15 1.15 3.3 290
0 11.60 1.99 2.28 18 98 3.00 2.26 .17 1.35 3.25 1.16 2.96 345
0 12.47 1.50 2.0 19 160 2.5 2.27 .30 3.28 2.6 1.16 2.63 937
0 11.81 2.10 2.74 21.5 134 1.6 .99 .14 1.56 2.5 .95 2.26 625
0 12.29 1.41 1.98 16 85 2.55 2.5 .29 1.77 2.9 1.23 2.74 428
0 12.37 1.07 2.1 18.5 88 3.50 3.75 .24 1.95 4.5 1.04 2.77 660
0 12.29 3.17 2.21 18 88 2.85 2.99 .45 2.81 2.3 1.40 2.83 406
0 12.08 2.08 1.7 17.5 97 2.23 2.17 .26 1.4 3.3 1.27 2.96 710
0 12.6 1.34 1.9 18.5 88 1.45 1.36 .29 1.35 2.45 1.04 2.77 562
0 12.34 2.45 2.46 21 98 2.56 2.11 .34 1.31 2.8 .8 3.38 438
0 11.80 1.70 1.88 19.5 86 2.5 1.64 .37 1.40 2.06 .94 2.44 415
0 12.51 1.73 1.98 20.5 85 2.0 1.90 .30 1.48 2.94 1.04 3.57 672
0 12.40 2.55 2.27 20 90 1.68 1.84 .66 1.40 2.7 .86 3.3 315
0 12.25 1.73 2.10 19 80 1.65 2.03 .37 1.63 3.4 1 3.17 510
0 12.70 1.75 2.28 22.5 84 1.38 1.76 .48 1.63 3.3 .88 2.40 488
0 12.20 1.29 1.94 19 90 2.36 2.04 .39 2.08 2.7 .86 3.00 312
0 11.61 1.35 2.7 20 94 2.74 2.90 .29 2.49 2.65 .96 3.26 680
0 11.46 3.74 1.80 19.5 107 3.18 2.58 .24 3.58 2.9 .75 2.81 562
0 12.50 2.43 2.17 21 88 2.55 2.27 .26 1.20 0 .9 2.78 325
0 11.76 2.68 2.90 20 103 1.75 2.03 .6 1.05 3.8 1.23 2.5 607
0 11.41 .74 2.5 21 88 2.48 2.01 .40 1.44 3.08 1.1 2.31 434
0 12.08 1.39 2.5 22.5 84 2.56 2.29 .43 1.04 2.9 .93 3.19 385
0 11.03 1.51 2.0 21.5 85 2.46 2.17 .50 2.01 1.9 1.71 2.87 407
0 11.80 1.47 1.99 20.8 86 1.98 1.6 .3 1.53 1.95 .95 3.33 495
0 12.40 1.61 2.19 22.5 108 0 2.09 .34 1.61 2.06 1.06 2.96 345
0 12.77 3.43 1.98 16 80 1.63 1.25 .43 .83 3.4 .7 2.10 372
0 10 3.43 0 19 87 0 1.64 .37 1.87 1.28 .93 3.05 564
0 11.45 2.4 2.40 20 96 2.9 2.79 .30 1.83 3.25 .8 3.39 625
0 11.56 2.05 3.23 28.5 119 3.18 5.08 .47 1.87 6 .93 3.69 465
0 12.40 4.43 2.73 26.5 100 2.0 2.13 .43 1.71 2.08 .90 3.10 365
0 13.05 5.8 2.13 21.5 86 2.60 2.65 .3 2.01 2.6 .73 3.1 380
0 11.87 4.31 2.39 21 80 2.86 3.03 .21 2.91 2.8 .75 3.64 380
0 12.07 2.16 2.17 21 85 2.6 2.65 .37 1.35 2.76 .86 3.28 378
0 12.43 1.53 2.29 21.5 86 2.74 3.15 .39 1.77 3.94 .69 2.84 352
0 11.79 2.13 2.78 28.5 90 2.13 2.24 .58 1.76 3 .97 2.44 466
0 12.37 1.63 2.3 24.5 88 2.20 2.45 .4 1.9 2.10 .89 2.78 342
0 12.04 4.3 2.38 20 80 2.1 1.75 .40 1.35 2.6 .79 2.57 580
Written on January 21, 2020