티스토리 뷰

1. Hypothesis and cost function

costfunction의 기본 원리 및 구조




2. 이제 tensorflow 실습-1 

import tensorflow as tf
tf.set_random_seed(777) # for reproducibility

# X and Y data 학습데이터 X,Y

x_train = [1, 2, 3]
y_train = [1, 2, 3]


# Try to find values for W and b to compute y_data = x_data * W + b
# We know that W should be 1 and b should be 0

# But let TensorFlow figure it out Variable은 변수와 다른 개념인데, 텐서플로우가 자체적으로 변경시키는 값이라고 생각한다.

[1] 은 Rank가 1인 1차원적인 개념이고,

W = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')

# Our hypothesis XW+b(H(x)공식)

hypothesis = x_train * W + b


# cost/loss function reduce_mean은 tensor가 주어졌을때 평균을 내준다.


cost = tf.reduce_mean(tf.square(hypothesis - y_train))

# Minimize(Gradient Decent) 텐서 variable이 스스로 minimize가 된다.

optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)

train = optimizer.minimize(cost) #cost minimize

# Launch the graph in a session. (세션을 생성해준다)

sess = tf.Session()
# Initializes global variables in the graph. (텐서플로우 이때 초기화)
sess.run(tf.global_variables_initializer())
# Fit the line
for step in range(2001):

sess.run(train) #train을 실행시키면서 타고 들어가는 과정 (세션을 실행 시키는 부분)


#20번에 한번씩 출력을 한다.

if step % 20 == 0:
print(step, sess.run(cost), sess.run(W), sess.run(b))


train -> cost -> hypothesis ->(1)W (2)b로 그래프로 타고 들어가는형식으로 이루어져있다.

* Result: Cost , W , b 순서로 출력이 되는것을 확인할 수 있다. 

(0, 9.220862, array([-0.82039154], dtype=float32), array([0.9928319], dtype=float32))

(20, 0.3749161, array([0.25959158], dtype=float32), array([1.3836247], dtype=float32))

(40, 0.26797462, array([0.38907704], dtype=float32), array([1.3602536], dtype=float32))

(60, 0.24272116, array([0.42680436], dtype=float32), array([1.300293], dtype=float32))

(80, 0.22043735, array([0.454601], dtype=float32), array([1.2395616], dtype=float32))

(100, 0.20020457, array([0.48031452], dtype=float32), array([1.1813425], dtype=float32))

(120, 0.18182898, array([0.50474566], dtype=float32), array([1.1258272], dtype=float32))

(140, 0.16514005, array([0.5280216], dtype=float32), array([1.0729178], dtype=float32))

(160, 0.1499828, array([0.55020285], dtype=float32), array([1.0224946], dtype=float32))

(180, 0.1362167, array([0.5713418], dtype=float32), array([0.974441], dtype=float32))

(200, 0.123714216, array([0.5914871], dtype=float32), array([0.92864573], dtype=float32))

(220, 0.1123592, array([0.6106857], dtype=float32), array([0.88500285], dtype=float32))

(240, 0.10204645, array([0.62898207], dtype=float32), array([0.8434109], dtype=float32))

(260, 0.09268019, array([0.6464187], dtype=float32), array([0.8037737], dtype=float32))

(280, 0.08417361, array([0.66303575], dtype=float32), array([0.76599914], dtype=float32))

(300, 0.07644776, array([0.67887175], dtype=float32), array([0.7299999], dtype=float32))

(320, 0.06943108, array([0.6939636], dtype=float32), array([0.6956925], dtype=float32))

(340, 0.06305844, array([0.70834625], dtype=float32), array([0.6629976], dtype=float32))

(360, 0.05727069, array([0.7220529], dtype=float32), array([0.6318391], dtype=float32))

(380, 0.05201419, array([0.7351154], dtype=float32), array([0.602145], dtype=float32))

(400, 0.04724005, array([0.747564], dtype=float32), array([0.5738462], dtype=float32))

(420, 0.042904194, array([0.75942755], dtype=float32), array([0.5468776], dtype=float32))

(440, 0.038966298, array([0.77073365], dtype=float32), array([0.52117646], dtype=float32))

(460, 0.035389785, array([0.78150827], dtype=float32), array([0.4966831], dtype=float32))

(480, 0.0321416, array([0.7917766], dtype=float32), array([0.47334078], dtype=float32))

(500, 0.029191487, array([0.8015623], dtype=float32), array([0.45109546], dtype=float32))

(520, 0.026512176, array([0.8108882], dtype=float32), array([0.42989555], dtype=float32))



3. 이제 tensorflow 실습-2


train 값을 직접 선언하지 않고 palceholder 를 사용한 예제


# Lab 2 Linear Regression

import tensorflow as tf

tf.set_random_seed(777) # for reproducibility
# Try to find values for W and b to compute y_data = W * x_data + b
# We know that W should be 1 and b should be 0
# But let's use TensorFlow to figure it out
W = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# Now we can use X and Y in place of x_data and y_data
# # placeholders for a tensor that will be always fed using feed_dict
# See http://stackoverflow.com/questions/36693740/
X = tf.placeholder(tf.float32, shape=[None])

Y = tf.placeholder(tf.float32, shape=[None])

#Shape를 None 지정할 수도 있음.

# Our hypothesis XW+b
hypothesis = X * W + b
# cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
# Fit the line
for step in range(2001):
cost_val, W_val, b_val, _ = \
sess.run([cost, W, b, train],
feed_dict={X: [1, 2, 3], Y: [1, 2, 3]})
if step % 20 == 0:
print(step, cost_val, W_val, b_val)
# Learns best fit W:[ 1.], b:[ 0]
'''
...
1980 1.32962e-05 [ 1.00423515] [-0.00962736]
2000 1.20761e-05 [ 1.00403607] [-0.00917497]
'''
# Testing our model
print(sess.run(hypothesis, feed_dict={X: [5]}))
print(sess.run(hypothesis, feed_dict={X: [2.5]}))
print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))
'''
[ 5.0110054]
[ 2.50091505]
[ 1.49687922 3.50495124]
'''
# Fit the line with new training data
for step in range(2001):
cost_val, W_val, b_val, _ = \
sess.run([cost, W, b, train],
feed_dict={X: [1, 2, 3, 4, 5],

Y: [2.1, 3.1, 4.1, 5.1, 6.1]}) # 값을 feed_dict를 통하여 train을 선언하지않고 , 리스트형식으로 값을 주어지게 해도 된다.

if step % 20 == 0:
print(step, cost_val, W_val, b_val)



# Testing our model # 값을 feed_dict를 통하여 train을 선언하지않고 , 리스트형식으로 값을 주어지게 해도 된다.
print(sess.run(hypothesis, feed_dict={X: [5]}))
print(sess.run(hypothesis, feed_dict={X: [2.5]}))
print(sess.run(hypothesis, feed_dict={X: [1.5, 3.5]}))

4. 최종 요약



위의 실습과정에서의 Tensorflow Mechanics 구조이다.


1. graph build과정에서의 텐서 플로우 구동


2. feed_dict으로 할당된 값 X,Y


3. w,b Value Return 


공지사항
최근에 올라온 글
최근에 달린 댓글
Total
Today
Yesterday
링크
«   2024/11   »
1 2
3 4 5 6 7 8 9
10 11 12 13 14 15 16
17 18 19 20 21 22 23
24 25 26 27 28 29 30
글 보관함