Linear regression with TensorFlow

Placeholder

foo(a,b){ .. }

실행하는 시점에서 아래와 같이 값을 결정하게 할 수 있다.

a = tf.placeholder(tf.int16)
b = tf.placeholder(tf.int16)

# Define some operations
add = tf.add(a, b)
mul = tf.mul(a, b)

with tf.Session() as sess:
    print "Addition with variables: %i" % sess.run(add, feed_dict={a:2, b:3})
    print "Multiplication with variables: %d" % sess.run(mul, feed_dict={a:2, b:3})
Addition with variables: 5
Multiplication with variables: 6

Linear regression

H(x) = Wx +b

$$cost(H(x)) = \frac{1}{m} \sum_{i=1}^{m}{(H(x^{(i)})-y^{(i)})^2}$$

Variable로 지정해야 tensorflow가 update를 할 수 있게 된다.

import tensorflow as tf

x_data = [1,2,3]
y_data = [1,2,3]

# Try to find values for W and b taht compute y_data = W * x_data + b
# (We know that W should be 1 and b 0, but Tensorflow will figure that out for us.)

W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.random_uniform([1], -1.0, 1.0))

# Our hypothesis
hypothesis = W * x_data + b

# Simplified cost function
cost = tf.reduce_mean(tf.square(hypothesis-y_data))

# Minimize
a = tf.Variable(0.1) # Learning rate, alpha
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)

# Before starting, initialize the variables.
# We are going to run this first.
init = tf.initialize_all_variables()

# Launch the graph
sess = tf.Session()
sess.run(init)

# Fit the line.
for step in xrange(2001):
    sess.run(train)
    if step % 100 == 0 :
        print step, sess.run(cost), sess.run(W), sess.run(b)

result

0 0.0695987 [ 1.0891968] [ 0.07517025]
100 1.00123e-06 [ 0.99883789] [ 0.00264173]
200 7.70968e-09 [ 0.99989802] [ 0.0002318]
300 5.84587e-11 [ 0.99999106] [  2.03111595e-05]
400 4.73695e-13 [ 0.99999917] [  1.74629804e-06]
500 4.73695e-15 [ 0.99999994] [  1.88630892e-07]
600 0.0 [ 1.] [  5.35269820e-08]
700 0.0 [ 1.] [  5.35269820e-08]
800 0.0 [ 1.] [  5.35269820e-08]
900 0.0 [ 1.] [  5.35269820e-08]
1000 0.0 [ 1.] [  5.35269820e-08]
1100 0.0 [ 1.] [  5.35269820e-08]
1200 0.0 [ 1.] [  5.35269820e-08]
1300 0.0 [ 1.] [  5.35269820e-08]
1400 0.0 [ 1.] [  5.35269820e-08]
1500 0.0 [ 1.] [  5.35269820e-08]
1600 0.0 [ 1.] [  5.35269820e-08]
1700 0.0 [ 1.] [  5.35269820e-08]
1800 0.0 [ 1.] [  5.35269820e-08]
1900 0.0 [ 1.] [  5.35269820e-08]
2000 0.0 [ 1.] [  5.35269820e-08]

with placeholder.
reuse가 가능하다.

import tensorflow as tf

x_data = [1,2,3]
y_data = [1,2,3]

# Try to find values for W and b taht compute y_data = W * x_data + b
# (We know that W should be 1 and b 0, but Tensorflow will figure that out for us.)

W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.random_uniform([1], -1.0, 1.0))

# with placeholder
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)


# Our hypothesis
hypothesis = W * X + b

# Simplified cost function
cost = tf.reduce_mean(tf.square(hypothesis-Y))

# Minimize
a = tf.Variable(0.1) # Learning rate, alpha
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)

# Before starting, initialize the variables.
# We are going to run this first.
init = tf.initialize_all_variables()

# Launch the graph
sess = tf.Session()
sess.run(init)

# Fit the line.
for step in xrange(2001):
    sess.run(train, feed_dict={X:x_data, Y:y_data})
    if step % 100 == 0 :
        print step, sess.run(cost,feed_dict={X:x_data, Y:y_data}), sess.run(W), sess.run(b)

result

0 0.0671113 [ 1.31180656] [-0.57569945]
100 0.000394039 [ 1.02305508] [-0.05240955]
200 3.03385e-06 [ 1.00202298] [-0.00459877]
300 2.33703e-08 [ 1.0001775] [-0.00040349]
400 1.7819e-10 [ 1.00001562] [ -3.54349249e-05]
500 1.51227e-12 [ 1.00000143] [ -3.18881439e-06]
600 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
700 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
800 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
900 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
1000 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
1100 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
1200 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
1300 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
1400 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
1500 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
1600 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
1700 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
1800 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
1900 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]
2000 3.78956e-14 [ 1.00000036] [ -6.25814607e-07]

이렇게 한번 model을 생성 했다면,
생성된 hypothesis를 이용해서 특정 X에 대한 모델을 출력할 수 있게 된다.

# learns best fit is W: [1], b: [0]

# Learns best fit is W: [1], b[0]
print sess.run(hypothesis, feed_dict={X:5})
print sess.run(hypothesis, feed_dict={X:2.5})

결과

[ 5.00000143]
[ 2.50000024]


+ Recent posts