Hi,
I’m reading the book Programming Machine Learning and trying to translate examples in Elixir using Nx.
Here is the code I exported from my Livebook.
Linear Regression
Mix.install([
{:nx, "~> 0.4.2"},
{:axon, "~> 0.4.1"},
{:explorer, "~> 0.5.0"},
{:kino, "~> 0.8.0"},
{:vega_lite, "~> 0.1.6"},
{:kino_vega_lite, "~> 0.1.7"}
])
Data
csv = """
Reservations,Pizzas
13,33
2,16
14,32
23,51
13,27
1,16
18,34
10,17
26,29
3,15
3,15
21,32
7,22
22,37
2,13
27,44
6,16
10,21
18,37
15,30
9,26
26,34
8,23
15,39
10,27
21,37
5,17
6,18
13,25
13,23
"""
{:ok, data} =
csv
|> Explorer.DataFrame.load_csv()
reserv = data["Reservations"]
pizzas = data["Pizzas"]
Let’s plot the actual values and our first attempt to model.
weight_text = Kino.Input.text("Weight", default: "1")
alias VegaLite, as: Vl
{weight, _} = Float.parse(Kino.Input.read(weight_text))
model = Explorer.DataFrame.new(iter: 1..Explorer.Series.max(reserv))
chart1 =
Vl.new()
|> Vl.data_from_values(data, only: ["Reservations", "Pizzas"])
|> Vl.mark(:point)
|> Vl.encode_field(:x, "Reservations", type: :quantitative)
|> Vl.encode_field(:y, "Pizzas", type: :quantitative)
chart2 =
Vl.new()
|> Vl.data_from_values(model, only: ["iter", "weights"])
|> Vl.transform(calculate: "datum.iter * #{weight}", as: "weights")
|> Vl.mark(:line)
|> Vl.encode_field(:x, "iter", type: :quantitative)
|> Vl.encode_field(:y, "weights", type: :quantitative)
combined =
Vl.new(width: 400, height: 300)
|> Vl.layers([chart1, chart2])
This is how VegaLite creates a linear regression.
Vl.new(width: 400, height: 300)
|> Vl.data_from_values(
reservations: Explorer.Series.to_list(reserv),
pizzas: Explorer.Series.to_list(pizzas)
)
|> Vl.layers([
Vl.new()
|> Vl.mark(:point, filled: true)
|> Vl.encode_field(:x, "reservations", type: :quantitative)
|> Vl.encode_field(:y, "pizzas", type: :quantitative),
Vl.new()
|> Vl.mark(:line, color: :firebrick)
|> Vl.transform(regression: "pizzas", on: "reservations")
|> Vl.encode_field(:x, "reservations", type: :quantitative)
|> Vl.encode_field(:y, "pizzas", type: :quantitative)
])
Predict and Loss
defmodule MachineLearning do
def predict(x, w, b) do
x
|> Nx.multiply(w)
|> Nx.add(b)
end
def loss(x, y, w, b) do
# basically mean squared error
predict(x, w, b)
|> Nx.subtract(y)
|> Nx.power(2)
|> Nx.mean()
end
def train(x, y, iter, lr) do
# initialize weights and biases to 0
Enum.reduce_while(1..iter, {0.0, 0.0}, fn i, {w, b} ->
current_loss = loss(x, y, w, b)
IO.puts("Iter ##{i} => Loss: #{inspect(current_loss)}")
cond do
loss(x, y, w + lr, b) < current_loss ->
{:cont, {w + lr, b}}
loss(x, y, w - lr, b) < current_loss ->
{:cont, {w - lr, b}}
loss(x, y, w, b + lr) < current_loss ->
{:cont, {w, b + lr}}
loss(x, y, w, b - lr) < current_loss ->
{:cont, {w, b - lr}}
true ->
{:halt, {w, b}}
end
end)
end
end
reserv_t = Nx.tensor(Explorer.Series.to_list(reserv))
pizzas_t = Nx.tensor(Explorer.Series.to_list(pizzas))
MachineLearning.train(reserv_t, pizzas_t, 50, 0.1)
I could not properly make it work on the training part, it always stops the training after a few iterations. Where do you think I get it wrong?