20170828
2017-08-28 本文已影响0人
Do_More
20170828
Re all in ml now.
Re learn re new.
1.tensorflow input mnist data
# Import MNIST
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Load data
X_train = mnist.train.images
Y_train = mnist.train.labels
X_test = mnist.test.images
Y_test = mnist.test.labels
# Get the next 64 images array and labels
batch_X, batch_Y = mnist.train.next_batch(64)
2.tensorflow hello world
import tensorflow as tf
hello = tf.constant('hello tensorflow!')
sess = tf.Session()
print(sess.run(hello))
3.tensorflow basic operations
import tensorflow as tf
a = tf.constant(2)
b = tf.constant(3)
with tf.Session() as sess:
print(sess.run(a))
print(sess.run(b))
print(sess.run(a + b))
print(sess.run(a * b))
a = tf.placeholder(tf.int16)
b = tf.placeholder(tf.int16)
add = tf.add(a,b)
mul = tf.multiply(a,b)
with tf.Session() as sess:
print(sess.run(add,feed_dict={a:2,b:3}))
print(sess.run(mul,feed_dict={a:2,b:3}))
matrix1 = tf.constant([[3,3]])
matrix2 = tf.constant([[2],[2]])
product = tf.matmul(matrix1,matrix2)
with tf.Session() as sess:
result = sess.run(product)
print(result)
4.nearest neighbor
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("mnist/",one_hot=True)
# take how much datas
Xtr, Ytr = mnist.train.next_batch(5000)
Xte, Yte = mnist.test.next_batch(200)
xtr = tf.placeholder("float", [None, 784])
xte = tf.placeholder("float", [784])
# calcute the min distance
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices = 1)
pred = tf.arg_min(distance, 0)
accuracy = 0
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(len(Xte)):
nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})
print("Test ", i, "Prediction: ", np.argmax(Ytr[nn_index]), "True Class: ", np.argmax(Yte[i]))
if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
accuracy += 1 / len(Xte)
print("Done!")
print("Accuracy: ",accuracy)
next_batch: 取多少数据
tf.negative: 求负值
tf.add: 相加
tf.abs: 求绝对值
tf.reduce_sum: 求和
reduction_indices = 1: 对第一项元素操作
tf.arg_min: 求最少值
np.argmax: 预测到实际的label值
5.tensorflow linear regression
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
learning_rate = 0.01
training_epochs = 1000
display_step = 50
train_X = numpy.asarray([3.3,
4.4,
5.5,
6.71,
6.93,
4.168,
9.779,
6.182,
7.59,
2.167,
7.042,
10.791,
5.313,
7.997,
5.654,
9.27,
3.1])
train_Y = numpy.asarray([1.7,
2.76,
2.09,
3.19,
1.694,
1.573,
3.366,
2.596,
2.53,
1.221,
2.827,
3.465,
1.65,
2.904,
2.42,
2.94,
1.3])
n_samples = train_X.shape[0]
X = tf.placeholder("float")
Y = tf.placeholder("float")
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
pred = tf.add(tf.multiply(X, W), b) # x * w + b
cost = tf.reduce_sum(tf.pow(pred - Y, 2)) # (pred - Y)^2
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
if (epoch + 1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Epoch:",'%04d' % (epoch + 1), "cost=", "{:.9f}".format(c), "W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
epoch: 时间点
numpy.asarray: 将输入的数据转换为矩阵形式
numpy.shape[0]: 读取矩阵第一维度的长度
numpy.randn: 生成正态分布随机数
tf.train.GradientDescentOptimizer(learning_rate).minimize(cost): 按照所要求的学习效率应用梯度下降
梯度下降: 使用梯度下降找到一个函数的局部极小值,必须向函数上当前点对应梯度(或者是近似梯度)的反方向的规定步长距离点进行迭代搜索
zip: 接受一系列可迭代的对象作为参数,将对象中对应的元素打包成一个个tuple(元组)