Here is my solution:
I was expecting a hard one, this being Friday and all. But it was pleasant to work with.
advent_of_code/lib/2023/day_09.ex at master · code-shoily/advent_of_code (github.com)
Just the core:
defmodule Day09 do
def reduce(list, field \\ &List.last/1) do
reduce(list, [field.(list)], field)
end
defp reduce(list, acc, field) do
if Enum.all?(list, &(&1 == 0)) do
acc
else
new =
list
|> Enum.chunk_every(2, 1, :discard)
|> Enum.map(fn [a, b] -> b - a end)
reduce(new, [field.(new) | acc], field)
end
end
end
Part 1:
lines
|> Enum.map(fn line ->
Day09.reduce(line)
|> Enum.reduce(&+/2)
end)
|> Enum.sum()
Part 2:
lines
|> Enum.map(fn line ->
Day09.reduce(line, &List.first/1)
|> Enum.reduce(&-/2)
end)
|> Enum.sum()
A bit lenghty but optimizing for not having to call Enum.all?(list, fn n -> n == 0 end)
defmodule AdventOfCode.Y23.Day9 do
alias AoC.Input, warn: false
def read_file(file, _part) do
Input.stream!(file, trim: true)
end
def parse_input(input, _part) do
input |> Enum.map(&list_of_ints/1)
end
defp list_of_ints(str) do
str |> String.split(" ", trim: true) |> Enum.map(&String.to_integer/1)
end
def part_one(problem) do
problem
|> Enum.map(&next_num/1)
|> Enum.sum()
end
def next_num(list) do
List.last(list) + _next_num(list)
end
defp _next_num(list) do
case diffs(list) do
{_, true} ->
0
{diffs, _} ->
last = List.last(diffs)
sub = _next_num(diffs)
last + sub
end
end
defp diffs([h | t]) do
{diffs, {_, all_zero?}} =
Enum.map_reduce(t, {h, _all_zero? = true}, fn next, {prev, all_zero?} ->
new = next - prev
{new, {next, all_zero? and new == 0}}
end)
{diffs, all_zero?}
end
def prev_num([h | _] = list) do
h - _prev_num(list)
end
defp _prev_num(list) do
case diffs(list) do
{_, true} ->
0
{[dh | _] = diffs, _} ->
sub = _prev_num(diffs)
dh - sub
end
end
def part_two(problem) do
problem
|> Enum.map(&prev_num/1)
|> Enum.sum()
end
end
Share my failure on Part 1
I was trying polynomial regression, and it explodes
The regression function for working out the coefficients of the polynomial:
# The param `y` is a list of numbers.
regression = fn y ->
y =
y
|> Nx.tensor(type: :f64)
|> Nx.new_axis(0)
|> Nx.transpose()
{n, 1} = Nx.shape(y)
v =
0..(n - 1)
|> Enum.to_list()
|> Nx.tensor(type: :f64)
x =
for p <- 0..(n-1) do
Nx.pow(v, p)
end
|> Nx.stack()
|> Nx.transpose()
xt = Nx.transpose(x)
coeffs =
xt
|> Nx.dot(x)
|> Nx.LinAlg.invert()
|> Nx.dot(xt)
|> Nx.dot(y)
errors = Nx.subtract(y, Nx.dot(x, coeffs))
{coeffs, errors}
end
And the function for predicting the last value of a line:
# x is the index of the yet-to-be-solved number in a line.
predict = fn x, {coeffs, errors} ->
{n, 1} = Nx.shape(coeffs)
x =
0..(n - 1)
|> Enum.map(& x ** &1)
|> Nx.tensor()
|> Nx.new_axis(0)
x
|> Nx.dot(coeffs)
|> Nx.add(errors)
|> Nx.mean()
|> Nx.to_number()
|> round()
end
Not sure if unfold was the best choice for this use case but it ended up quite clean.
solve = fn first? ->
File.stream!("input.txt")
|> Stream.map(fn ln ->
String.split(ln)
|> Enum.map(&String.to_integer/1)
|> Stream.unfold(fn l ->
case Enum.all?(l, & &1 == 0) do
true -> nil
false -> {Enum.at(l, first? && 0 || -1), Enum.zip_with(tl(l), l, &-/2)}
end
end)
|> then(fn ends ->
case first? do
true -> ends |> Enum.reverse() |> Enum.reduce(&-/2)
false -> ends |> Enum.sum()
end
end)
end)
|> Enum.sum()
end
IO.inspect(solve.(false), label: "Star 1")
IO.inspect(solve.(true), label: "Star 2")
yeah this day was very easy
in p1 I accumulated only last element of each sub-line before predicting history as I anticipated there would be memory issues in p2 if you were accumulating all sub-lines
…but p2 was as easy as p1
At first I used reduce_while/3
to solve it, but then switched to Stream.unfold/2
inspired by @lbm364dl:
Did not know there was an AoC thread here! This is my approach:
Busted out the recursive functions today:
Part 1
defmodule Part1 do
def difference(history, fun), do: difference(history, [], fun)
def difference(prev_diffs, rest_diffs, fun) do
if Enum.all?(prev_diffs, &(&1 == 0)) do
fun.(rest_diffs, 0)
else
next_diffs =
prev_diffs
|> Enum.chunk_every(2, 1, :discard)
|> Enum.map(fn xs -> Enum.reduce(xs, &Kernel.-/2) end)
difference(next_diffs, [prev_diffs | rest_diffs], fun)
end
end
def predictr([], extrapolation), do: extrapolation
def predictr([diffs | rest], extrapolation) do
predictr(rest, extrapolation + List.last(diffs))
end
end
histories
|> Enum.map(fn history -> Part1.difference(history, &Part1.predictr/2) end)
|> Enum.sum()
Part 2
defmodule Part2 do
def predictl([], extrapolation), do: extrapolation
def predictl([[first | _] | rest], extrapolation) do
predictl(rest, first - extrapolation)
end
end
histories
|> Enum.map(fn history -> Part1.difference(history, &Part2.predictl/2) end)
|> Enum.sum()
There’s a private leaderboard you can join using the code 3241528-139712b3
.
My solution works with the test but the actual input sequences aren’t becoming all zeros at the end, there’s a single non-zero value that I’m unable to get rid of.
For example:
12 20 25 35 64 137 305 670 1420 2874 5537 10165 17840 30055 48809 76712 117100 174160 253065 360119 502912
becomes
[4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
Maybe I’m misunderstanding the challenge.
defmodule Day09 do
def part1(input) do
parse(input)
|> Enum.map(&prediction/1)
|> Enum.sum
end
def prediction(sequence, last_values \\ []) do
last = List.last(sequence)
cond do
[0] == Enum.uniq(sequence) -> Enum.sum(last_values)
true -> prediction(next_sequence(sequence), last_values ++ [last])
end
end
def next_sequence(sequence, new_sequence \\ [])
def next_sequence([_last], new_sequence), do: new_sequence
def next_sequence([head | tail], new_sequence) do
new_value = abs(head - List.first(tail))
next_sequence(tail, new_sequence ++ [new_value])
end
def parse(raw_report) do
raw_report
|> String.split("\n")
|> Enum.map(fn line -> line
|> String.split
|> Enum.map(&String.to_integer/1)
end)
end
end
Pretty much the same as everyone else’s. Passing functions and tail recursion makes this really clean. https://github.com/shritesh/advent/blob/main/2023/09.livemd
Converted part 1 to a simple visualization using Kino + VegaLite:
Code
alias VegaLite, as: Vl
defmodule Viz do
def difference(widget, history), do: difference(widget, history, [])
def difference(widget, prev_diffs, rest_diffs) do
i = length(rest_diffs)
data =
for {diff, x} <- Enum.with_index(prev_diffs) do
%{"x" => x, "y" => diff, "i" => i}
end
Kino.VegaLite.push_many(widget, data)
Process.sleep(div(500, i + 1))
if Enum.all?(prev_diffs, &(&1 == 0)) do
predictr(widget, [prev_diffs | rest_diffs])
else
next_diffs =
prev_diffs
|> Enum.chunk_every(2, 1, :discard)
|> Enum.map(fn xs -> Enum.reduce(xs, &Kernel.-/2) end)
difference(widget, next_diffs, [prev_diffs | rest_diffs])
end
end
def predictr(widget, diffs), do: predictr(widget, diffs, 0)
def predictr(_, [], extrapolation), do: extrapolation
def predictr(widget, [diffs | rest], extrapolation) do
next_extrapolation = extrapolation + List.last(diffs)
i = length(rest)
Kino.VegaLite.push(widget, %{
"x" => length(diffs),
"y" => next_extrapolation,
"i" => i
})
Process.sleep(div(500, i + 1))
predictr(widget, rest, next_extrapolation)
end
end
widget =
Vl.new(width: 600, height: 300)
|> Vl.mark(:line)
|> Vl.encode_field(:x, "x", type: :quantitative)
|> Vl.encode_field(:y, "y", type: :quantitative)
|> Vl.encode_field(:color, "i", type: :nominal)
|> Kino.VegaLite.render()
for history <- histories do
Kino.VegaLite.clear(widget)
Viz.difference(widget, history)
end
These are the intermediate lists I found with my working solution:
12 20 25 35 64 137 305 670 1420 2874 5537 10165 17840 30055 48809 76712 117100 174160 253065 360119 502912
8 5 10 29 73 168 365 750 1454 2663 4628 7675 12215 18754 27903 40388 57060 78905 107054 142793
-3 5 19 44 95 197 385 704 1209 1965 3047 4540 6539 9149 12485 16672 21845 28149 35739
8 14 25 51 102 188 319 505 756 1082 1493 1999 2610 3336 4187 5173 6304 7590
6 11 26 51 86 131 186 251 326 411 506 611 726 851 986 1131 1286
5 15 25 35 45 55 65 75 85 95 105 115 125 135 145 155
10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
0 0 0 0 0 0 0 0 0 0 0 0 0 0
Thank you! Turns out, I was wrong to take absolute value of the difference like so:
new_value = abs(head - List.first(tail))
Instead, I had to simply subtract the second value from the first
new_value = List.first(tail) - head
With this change, I got the right answer immediately.
I thought the first few days had some excessively hard problems for early on but this was an oddly easy day. Part 2 was just not much of a challenge after part 1.
Code
defmodule Day9 do
@moduledoc """
Day9 AoC Solutions
"""
alias AocToolbox.Input
# add sample data here
def input(:test),
do: """
0 3 6 9 12 15
1 3 6 10 15 21
10 13 16 21 30 45
"""
def input(:real), do: Input.load(__DIR__ <> "/input.txt")
def solve(1, mode) do
__MODULE__.Part1.solve(input(mode))
end
def solve(2, mode) do
__MODULE__.Part2.solve(input(mode))
end
def parse(input) do
input
|> String.split("\n", trim: true)
|> Enum.map(&String.split(&1, ~r"\s"))
|> Enum.map(fn gs -> gs |> Enum.map(&String.to_integer/1) end)
end
def intervals(histories), do: Enum.map(histories, fn row -> do_intervals(row, []) end)
def do_intervals(row, acc) do
if Enum.all?(row, &Kernel.==(&1, 0)) do
[row | acc]
else
next_row = Enum.chunk_every(row, 2, 1, :discard) |> Enum.map(fn [a, b] -> b - a end)
do_intervals(next_row, [row | acc])
end
end
def apply_intervals(intervals, part \\ 1)
def apply_intervals(intervals, 1) do
intervals
|> Enum.map(&Enum.map(&1, fn row -> Enum.reverse(row) |> hd() end))
|> Enum.map(&Enum.sum/1)
end
def apply_intervals(intervals, 2) do
intervals
|> Enum.map(&Enum.map(&1, fn row -> hd(row) end))
|> Enum.map(&Enum.reduce(&1, fn i, acc -> i - acc end))
end
defmodule Part1 do
def solve(input) do
input
|> Day9.parse()
|> Day9.intervals()
|> Day9.apply_intervals()
|> Enum.sum()
end
end
defmodule Part2 do
def solve(input) do
input
|> Day9.parse()
|> Day9.intervals()
|> Day9.apply_intervals(2)
|> Enum.sum()
end
end
end
Built an extrapolate function with map
and unfold
. Could reuse it for part 2 with minimal adjustments:
I just can’t get rid of the impulse to solve this puzzle using Nx and linear algebra.
Here’s my new code with a lot of description about the math.
Please download it to your livebook to see the content because GitHub does not render the LaTeX parts correctly.
By the way, the code is fully vectorized so there’s no explicit for-loop or Enum
or Stream
function calls in solving the equation, except for parsing the input and building the tensors.
Found a bug in Nx.BinaryBackend
.
When running Nx.LinAlg.solve(mat_x, mat_y)
in a livebook when mat_y
has more columns than rows, this error was raised:
#Inspect.Error<
got ArgumentError with message:
"""
errors were found at the given arguments:
* 3rd argument: out of range
"""
while inspecting:
%{
data: %Nx.BinaryBackend{
state: <<4, 0, 0, 0, 0, 0, 8, 192, 46, 33, 9, 20, 142, 152, 243, 188, 179, 255, 255, 255, 255,
255, 19, 64, 4, 0, 0, 0, 0, 0, 8, 192, 46, 33, 9, 20, 142, 152, 243, 188, 179, 255, 255, 255,
255, 255, 19, 64, ...>>
},
type: {:f, 64},
names: [nil, nil],
__struct__: Nx.Tensor,
vectorized_axes: [],
shape: {6, 7}
}
Stacktrace:
:erlang.binary_part(<<4, 0, 0, 0, 0, 0, 8, 192, 46, 33, 9, 20, 142, 152, 243, 188, 179, 255, 255, 255, 255, 255, 19, 64, 4, 0, 0, 0, 0, 0, 8, 192, 46, 33, 9, 20, 142, 152, 243, 188, 179, 255, 255, 255, 255, 255, 19, 64, 28, 0, ...>>, 0, 336)
(nx 0.6.4) lib/nx/binary_backend.ex:179: Nx.BinaryBackend.to_binary/2
(nx 0.6.4) lib/nx/binary_backend.ex:1028: Nx.BinaryBackend.inspect/2
(nx 0.6.4) lib/nx/tensor.ex:241: Inspect.Nx.Tensor.inspect/2
(elixir 1.15.7) lib/inspect/algebra.ex:348: Inspect.Algebra.to_doc/2
(elixir 1.15.7) lib/kernel.ex:2366: Kernel.inspect/2
(kino 0.12.0) lib/kino/output.ex:21: Kino.Output.inspect/2
lib/livebook/runtime/evaluator/formatter.ex:62: Livebook.Runtime.Evaluator.Formatter.to_output/1
>
There’s no error when using EXLA.Backend
.
Still digging the source code of Nx.BinaryBackend
trying to spot the cause of this error.