Mercurial > hg > tvii
changeset 91:d603ee579c3e
add linear regression
author | Jeff Hammel <k0scist@gmail.com> |
---|---|
date | Sun, 17 Dec 2017 14:25:13 -0800 |
parents | 3ff05538259c |
children | f1d1f2388fd6 |
files | tvii/linear_regression.py |
diffstat | 1 files changed, 130 insertions(+), 0 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tvii/linear_regression.py Sun Dec 17 14:25:13 2017 -0800 @@ -0,0 +1,130 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Linear regression using tensorflow: + +W * x + b = y + +Yields `W` and `b` + +Ref: +https://www.tensorflow.org/get_started/get_started +""" + +# minimize: +# (h(x) - y)**2 +# => +# (1./(2*m))*sum_i={i..m} (h(x_i) - y_i)**2 +# m -- number of training examples +# => +# (1./(2*m))*sum_i={i..m} ((b + W*x_i) - y_i)**2 +# : cost function; in this case, the squared error function + +import csv +import json +import os +import sys +import tensorflow as tf +from .cli import CLIParser as Parser +from .read import read +from .transpose import transpose + +class LinearRegression(object): + """class-based approach to linear regression""" + + def __init__(self, W_guess=0., b_guess=0.): + # model parameters: guesses + self.W = tf.Variable([W_guess], tf.float32) + self.b = tf.Variable([b_guess], tf.float32) + raise NotImplementedError('TODO') + + def __call__(self, x_train, y_train, max_iterations=1000): + # Model input and output + x = tf.placeholder(tf.float32) + linear_model = W * x + b + y = tf.placeholder(tf.float32) + + # loss + loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares + + + +def linear_regression(x_train, + y_train, + W_guess=0., + b_guess=0., + max_iterations=1000): + """ + perform a linear regression: + + W*x + b + + Returns: + (W_trained, b_trained, loss) + """ + + # model parameters: guesses + W = tf.Variable([W_guess], tf.float32) + b = tf.Variable([b_guess], tf.float32) + + # Model input and output + x = tf.placeholder(tf.float32) + linear_model = W * x + b + y = tf.placeholder(tf.float32) + + # loss + loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares + + # optimizer + optimizer = tf.train.GradientDescentOptimizer(0.01) + train = optimizer.minimize(loss) + + # training loop + init = tf.global_variables_initializer() + sess = tf.Session() + sess.run(init) # reset values to wrong + for i in range(max_iterations): + sess.run(train, {x:x_train, y:y_train}) + + # evaluate training accuracy + curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:x_train, y:y_train}) + return (curr_W[0], curr_b[0], curr_loss) + + +def main(args=sys.argv[1:]): + """CLI""" + + # parse command line + parser = Parser(description=__doc__) + parser.add_argument('data', type=read, + help="(x,y) data to read") + parser.add_argument('--W', dest='W_guess', + type=float, default=0.3, + help="best guess for `W` [DEFAULT: %(default)s]") + parser.add_argument('--b', dest='b_guess', + type=float, default=-0.3, + help="best guess for `b` [DEFAULT: %(default)s]") + parser.add_argument('--iterations', '--max-iterations', dest='max_iterations', + type=int, default=1000, + help="maximum number of iterations [DEFAULT: %(default)s]") + options = parser.parse_args(args) + + # training data + x_train, y_train = transpose(options.data) + + # learn! + W, b, loss = linear_regression(x_train, + y_train, + options.W_guess, + options.b_guess, + max_iterations=options.max_iterations) + + # recite your lesson + print (json.dumps(dict(W=float(W), + b=float(b), + loss=float(loss)))) + + +if __name__ == '__main__': + main()