Olivier-Truong commited on
Commit
b31acbe
·
1 Parent(s): 61d4547
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import torch
4
+ import math
5
+
6
+
7
+ dtype = torch.float
8
+ device = torch.device("cpu")
9
+ # device = torch.device("cuda:0") # Uncomment this to run on GPU
10
+
11
+ # Create random input and output data
12
+ x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
13
+ y = torch.sin(x)
14
+
15
+ # Randomly initialize weights
16
+ a = torch.randn((), device=device, dtype=dtype)
17
+ b = torch.randn((), device=device, dtype=dtype)
18
+ c = torch.randn((), device=device, dtype=dtype)
19
+ d = torch.randn((), device=device, dtype=dtype)
20
+
21
+ learning_rate = 1e-6
22
+ for t in range(2000):
23
+ # Forward pass: compute predicted y
24
+ y_pred = a + b * x + c * x ** 2 + d * x ** 3
25
+
26
+ # Compute and print loss
27
+ loss = (y_pred - y).pow(2).sum().item()
28
+ if t % 100 == 99:
29
+ print(t, loss)
30
+
31
+ # Backprop to compute gradients of a, b, c, d with respect to loss
32
+ grad_y_pred = 2.0 * (y_pred - y)
33
+ grad_a = grad_y_pred.sum()
34
+ grad_b = (grad_y_pred * x).sum()
35
+ grad_c = (grad_y_pred * x ** 2).sum()
36
+ grad_d = (grad_y_pred * x ** 3).sum()
37
+
38
+ # Update weights using gradient descent
39
+ a -= learning_rate * grad_a
40
+ b -= learning_rate * grad_b
41
+ c -= learning_rate * grad_c
42
+ d -= learning_rate * grad_d
43
+
44
+
45
+ print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')