배고픈 개발자 이야기

[2021/09/06] DNN_CNN (선형회귀 분석) 본문

인포섹 아카데미

[2021/09/06] DNN_CNN (선형회귀 분석)

이융희 2021. 9. 6. 17:48
728x90

import numpy as np

X = np.array([1,2,3], dtype="float32")
y = np.array([2, 2.5, 3.5], dtype="float32")

w, b = 2, 1
hypothesis = X*w + b

w_gred = 1/3*np.dot((hypothesis-y), X.T)
w = w - w_gred

b_gred = 1/3*np.sum(hypothesis-y)
b = b - b_gred

print("w, b")
print(w_gred, b_gred)
print(w, b)
print()

# 오차제곱합이 최소화 되는 w, b를 100회에 걸쳐서 찾음
for i in range(100):
  hypothesis = X*w+b
  print("hyp[othesis:",hypothesis)
  MSE = np.sum((hypothesis-y)**2)
  print("MSE:",MSE)
  w_gred = 1/3*np.dot((hypothesis-y), X.T)
  print("w_gred:",w_gred)
  b_gred = 1/3*np.sum(hypothesis-y)
  print("b_gred:",b_gred)
  w = w - w_gred
  b = b - b_gred
  print("w:",w," b:",b)

 

 

 

w, b = 2, 1
learning_rate = 0.1

for i in range(1000):
  hypothesis = X*w+b
  print("hypothesis:",hypothesis)
  MSE = np.sum((hypothesis-y)**2)
  print("MSE:",MSE)
  w_gred = 1/3*np.dot((hypothesis-y), X.T)
  b_gred = 1/3*np.sum(hypothesis-y)
  print("b_gred:",b_gred)
  w = w-(learning_rate*w_gred)
  print("w:",w)
  b = b-(learning_rate*b_gred)
  print("b:",b)

 

 

 

import matplotlib.pyplot as plt

plt.plot(X, y, marker='o')
plt.plot(X, hypothesis, mfc='r', ls="-")
plt.ylim(0, 4)

 

 

TensorFlow

 

%tensorflow_version 1.x

import tensorflow as tf

tf.enable_eager_execution()
tf.__version__
# 1.15.2

X = np.array([1, 2, 3], dtype="float32")
Y = np.array([2, 2.5, 3.5], dtype="float32")
W = tf.Variable([2], dtype="float32")
b = tf.Variable([1], dtype="float32")

for i in range(1000):
  # Gradient descent를 실행해서 W와 b를 업데이트 할 객체 생성
  with tf.GradientTape() as tape:
    hypothesis = W*X + b
    print("hypothesis:",hypothesis)
    # 평균을 계산하는 함수, 오차 제곱의 평균
    cost = tf.reduce_mean(tf.square(hypothesis - Y))
    print("cost:",cost)
  # cost, W, b를 이용하여 계산
  W_grad, b_grad = tape.gradient(cost, [W, b])
  print("W_grad:", W_grad, ":b_grad:", b_grad)
  W.assign_sub(W_grad*learning_rate)
  b.assign_sub(b_grad*learning_rate)

 

plt.plot(X, Y, marker='o')
plt.plot(X, hypothesis.numpy(), mfc='r', ls="-")
plt.ylim(0,4)

Comments