File size: 1,612 Bytes
b31acbe
 
 
0d54098
 
b31acbe
 
0d54098
 
 
 
 
 
 
 
b31acbe
 
 
 
 
 
 
 
 
 
 
 
 
0d54098
b31acbe
 
 
 
 
 
 
 
0d54098
b31acbe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0d54098
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# -*- coding: utf-8 -*-

import torch
import math, sys
from flask import *


app = Flask(__name__)
app.config['data'] = ""

def print(data):
    app.config['data'] += data + "<br>"
    sys.stdout.write(data + "\n")
    sys.stdout.flush()

dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU

# Create random input and output data
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
y = torch.sin(x)

# Randomly initialize weights
a = torch.randn((), device=device, dtype=dtype)
b = torch.randn((), device=device, dtype=dtype)
c = torch.randn((), device=device, dtype=dtype)
d = torch.randn((), device=device, dtype=dtype)
print(f"{a} {b} {c} {d}")
learning_rate = 1e-6
for t in range(2000):
    # Forward pass: compute predicted y
    y_pred = a + b * x + c * x ** 2 + d * x ** 3

    # Compute and print loss
    loss = (y_pred - y).pow(2).sum().item()
    if t % 100 == 99:
        print(str(t) + " " + str(loss))

    # Backprop to compute gradients of a, b, c, d with respect to loss
    grad_y_pred = 2.0 * (y_pred - y)
    grad_a = grad_y_pred.sum()
    grad_b = (grad_y_pred * x).sum()
    grad_c = (grad_y_pred * x ** 2).sum()
    grad_d = (grad_y_pred * x ** 3).sum()

    # Update weights using gradient descent
    a -= learning_rate * grad_a
    b -= learning_rate * grad_b
    c -= learning_rate * grad_c
    d -= learning_rate * grad_d


print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')

@app.route('/')
def index():
    return app.config["data"]

app.run(host= "0.0.0.0", port=7860)