Advent of Code 2024 - Day 1

Nice easy start to the yearly tradition!

# part 1
File.stream!("input.txt")
|> Enum.reduce({[], []}, fn line, {lst1, lst2} ->
  String.split(line)
  |> Enum.map(&String.to_integer/1)
  |> then(fn [n1, n2] -> {[n1 | lst1], [n2 | lst2]} end)
end)
|> then(fn {lst1, lst2} -> Stream.zip_with(Enum.sort(lst1), Enum.sort(lst2), fn a, b -> abs(a-b) end) end)
|> Enum.sum()
|> IO.inspect(label: "part 1")

# part 2
File.stream!("input.txt")
|> Enum.reduce({[], %{}}, fn line, {lst, map} ->
  String.split(line)
  |> Enum.map(&String.to_integer/1)
  |> then(fn [n1, n2] -> {[n1 | lst], Map.update(map, n2, 1, fn n -> n+1 end)} end)
end)
|> then(fn {lst, map} -> Enum.reduce(lst, 0, fn n, acc -> acc + n * Map.get(map, n, 0) end) end)
|> IO.inspect(label: "part 2")

I liked this solution for part 1 because of the unzip / process / zip pattern:

File.stream!("example.txt")
|> Stream.map(&String.trim/1)
|> Stream.map(&String.split/1)
|> Stream.map(fn [a, b] -> {String.to_integer(a), String.to_integer(b)} end)
|> Enum.unzip()
|> then(fn {l, r} -> [Enum.sort(l), Enum.sort(r)] end)
|> Stream.zip()
|> Stream.map(fn {a, b} -> abs(b-a) end)
|> Enum.sum()
|> IO.inspect()

Part 2 wasnā€™t quite as symmetrical:

{left, right} =
  File.stream!("input.txt")
  |> Stream.map(&String.trim/1)
  |> Stream.map(&String.split/1)
  |> Stream.map(fn [a, b] -> {String.to_integer(a), String.to_integer(b)} end)
  |> Enum.unzip()

f_right = Enum.frequencies(right)

left
|> Stream.map(fn l -> {l, Map.get(f_right, l, 0)} end)
|> Stream.map(fn {l, n} -> l * n end)
|> Enum.sum()
|> IO.inspect()

Technically it would be faster to fuse successive Stream.maps into a single one with a more-complicated function, but writing in this style keeps each line focused on a specific transformation.

4 Likes

Our solutions are quite similar :wink:

Personally I like to write unit tests and save input as a file in the test folder.
Pretty useful when you want to do a bit of refactor while doing part 2 to ensure you donā€™t break part 1.

defmodule D1 do
  def p1(file) do
    file
    |> build_lists()
    |> Enum.map(&Enum.sort/1)
    |> Enum.zip_reduce(0, fn [a, b], acc -> acc + abs(b - a) end)
  end

  def p2(file) do
    [l1, l2] = build_lists(file)
    freq = Enum.frequencies(l2)
    Enum.reduce(l1, 0, fn a, acc -> acc + a * Map.get(freq, a, 0) end)
  end

  defp build_lists(file) do
    file
    |> File.stream!(:line)
    |> Enum.reduce([[], []], fn line, [l1, l2] ->
      [a, b] =
        line
        |> String.split()
        |> Enum.map(&String.to_integer/1)

      [[a | l1], [b | l2]]
    end)
  end
end

unit tests:

defmodule D1Test do
  use ExUnit.Case

  test "p1" do
    assert D1.p1("test/d1.txt") == 2_970_687
  end

  test "p2" do
    assert D1.p2("test/d1.txt") == 23_963_899
  end
end

It could also be useful when things are getting harder to write a unit test with the given examples.

Someoneā€™s getting into the typing mindset before the big release it seems :grin:

I wish. I am currently jobless. :003:

I was referring to being thorough always though.

That being said, I am ashamed to have forgotten about Enum.zip_with and Enum.zip_reduce.

I was looking for a way to read file from web so this is very handy, also the left right into tuple if something I havenā€™t worked out in elixir yet, so again useful.

Part 1 you could
!> Enum.reduce(0, fn {a, b}, acc ā†’ acc + abs(a - b) end)

and remove the sum step I believe

context: Iā€™m new to Elixir and not a massive user of FP in general outside of LINQ in C#, Iā€™m very interested in the Elixir ecosystem.

What I love about this challenge is that it shows how many ways there are to solve the same problem, in the same language. I hoping to use this AoC to find some idiomatic Elixir (and functional) ways to do things I just havenā€™t thought of.

Iā€™ve solved Day 1, using Livebook so no fancy modules or anything, but already Iā€™m noticing some patterns. For instance, I see a lot of |> Enum.map |> Enum.sum being used when a |> Enum.reduce would do the step in a single line. Although both are correct, Iā€™m wondering what the mental leap is to merge those map/sum steps to a reduce is. It wasnā€™t until a colleague said I realised it too. I solved the problem and didnā€™t go back to use reduce until after.Iā€™m curious as to when reduce becomes ā€˜normalā€™ thinking in FP.

My pre ā€˜reduceā€™ solution is here [aoc-2024/day1.livemd at main Ā· davewil/aoc-2024 Ā· GitHub] very happy for constructive feedback.

My post ā€˜reduceā€™ solution generally merges the final map/sum into a reduce accumulate with 0 initial value

1 Like

Signed up just to say thank you for this comment! Didnā€™t know about the then function. Subsequently have learned about the tap function as well thanks to this article: Useful Elixir Functions You May Not Know: tap() & then()

I guess itā€™s not that different from |> fn x -> x end.(), but it at least feels a bit nicer.

4 Likes

input
|> Enum.Map()
|> Enum.Sum()

can usually be replaced with a
input
|> Enum.reduce()

if you want to

By the way, you can also achieve similar memory footprint with stream.

input
|> Stream.map(...)
|> Enum.sum()

Hi everyone!

This is my first year doing Advent of Code. Hereā€™s my solution for Day 1:

defmodule AdventOfCode.DayOne do
  def part_one(input) do
    input
    |> parse_input()
    |> get_distances()
    |> Enum.sum()
  end

  def part_two(input) do
    input
    |> parse_input()
    |> get_similarity_scores()
    |> Enum.sum()
  end

  defp parse_input(input) do
    input
    |> String.split("\n")
    |> Enum.map(fn line ->
      line
      |> String.trim()
      |> String.split()
      |> Enum.map(&String.to_integer/1)
      |> List.to_tuple()
    end)
    |> Enum.unzip()
    |> Tuple.to_list()
  end

  defp get_distances(input) do
    input
    |> Enum.map(&Enum.sort/1)
    |> Enum.zip_with(fn [l, r] -> abs(l - r) end)
  end

  defp get_similarity_scores(input) do
    [left_column, right_column] = input

    Enum.map(left_column, fn n ->
      n * Enum.count(right_column, &(&1 === n))
    end)
  end
end
2 Likes

Benchmarks or itā€™s not true :stuck_out_tongue:

Okay, here is the benchmark result on my laptop.

Code:

ls = 1..1000 |> Enum.map(&:rand.uniform(&1))

Benchee.run(%{
  reduce: fn ->
    Enum.reduce(ls, 0, & &1 * &1 + &2)
  end,

  stream: fn ->
    ls |> Stream.map(& &1 * &1) |> Enum.sum()
  end
}, memory_time: 2)

Result:

Operating System: Linux
CPU Information: 13th Gen Intel(R) Core(TM) i7-13620H
Number of Available Cores: 16
Available memory: 15.31 GB
Elixir 1.17.3
Erlang 27.1.2
JIT enabled: true

Benchmark suite executing with the following configuration:
warmup: 2 s
time: 5 s
memory time: 2 s
reduction time: 0 ns
parallel: 1
inputs: none specified
Estimated total run time: 18 s

Benchmarking stream ...
Benchmarking reduce ...
Calculating statistics...
Formatting results...

Name             ips        average  deviation         median         99th %
stream        4.78 K      209.18 Ī¼s     Ā±6.75%      208.48 Ī¼s      220.47 Ī¼s
reduce        2.62 K      381.22 Ī¼s     Ā±3.49%      379.53 Ī¼s      399.30 Ī¼s

Comparison: 
stream        4.78 K
reduce        2.62 K - 1.82x slower +172.04 Ī¼s

Memory usage statistics:

Name      Memory usage
stream       509.39 KB
reduce       907.22 KB - 1.78x memory usage +397.83 KB

**All measurements for memory usage were the same**

But I guess itā€™s not enough to show the extra memory consumption.

2 Likes

My goal here was to see how ChatGPT could assist me in solving it.

I know youā€™re talking about memory but on an M1 running the same code, Stream is 3x slower:

Name             ips        average  deviation         median         99th %
reduce      301.47 K        3.32 Ī¼s   Ā±212.38%        3.29 Ī¼s        3.63 Ī¼s
stream       95.30 K       10.49 Ī¼s   Ā±409.82%        9.21 Ī¼s       14.54 Ī¼s

Comparison:
reduce      301.47 K
stream       95.30 K - 3.16x slower +7.18 Ī¼s

Memory usage statistics:

Name      Memory usage
reduce            0 KB
stream        62.80 KB - āˆž x memory usage +62.80 KB

**All measurements for memory usage were the same**

Operating System: macOS
CPU Information: Apple M1 Pro
Number of Available Cores: 10
Available memory: 16 GB
Elixir 1.17.2
Erlang 27.0.1

Played code golf my first attempt.

Part 1:

sorted = file |> file_to_lists_of_ints |> Enum.zip_with(&Enum.sort/1)
Enum.zip_reduce(sorted, 0, fn [a, b], sum -> sum + abs(a - b) end)

Part 2:

[left, right] = file |> file_to_lists_of_ints |> Enum.zip_with(&Enum.frequencies/1)
Enum.reduce(left, 0, fn {k, v}, sum -> sum + k * v * Map.get(right, k, 0) end)

Boilerplate: file_to_lists_of_ints does (the equivalent of):

file |> File.read!() |> String.trim() |> String.split("\n") |> Enum.map(&String.to_integer/1)
2 Likes
defmodule Day1 do
  def run(test_data) do
    lists = lists_from(test_data)
    IO.puts("Difference Score: #{sum_of_differences(lists)}")
    IO.puts("Similarity Score: #{sum_of_similarities(lists)}")
  end

  def lists_from(file) do
    file
    |> File.stream!()
    |> Stream.map(&String.split/1)
    |> Stream.map(fn [a, b] -> {String.to_integer(a), String.to_integer(b)} end)
    |> Enum.unzip()
  end

  def sum_of_differences({list1, list2}) do
    Stream.zip(Enum.sort(list1), Enum.sort(list2))
    |> Stream.map(fn {x, y} -> abs(x - y) end)
    |> Enum.sum()
  end

  def sum_of_similarities({list1, list2}) do
    counts = Enum.frequencies(list2)

    list1
    |> Enum.map(fn num -> num * Map.get(counts, num, 0) end)
    |> Enum.sum()
  end
end

Day1.run("./testdata.txt")

Late to posting this, but here are my solutions for yesterday.

Part 1:

File.stream!("01/input.txt")
|> Enum.reduce(
  %{left: [], right: []},
  fn line, state ->
    {num1, rest} = Integer.parse(line)
    {num2, _rest} = rest |> String.trim() |> Integer.parse()
    %{state | left: [num1 | state.left], right: [num2 | state.right]}
  end
)
|> Map.values()
|> Enum.map(&Enum.sort/1)
|> Enum.zip_reduce(
  0,
  fn [l, r], acc ->
    acc + abs(r - l)
  end
)
|> IO.puts()

Part 2:

File.stream!("01/input.txt")
|> Enum.reduce(
  %{left: [], right: []},
  fn line, state ->
    {num1, rest} = Integer.parse(line)
    {num2, _rest} = rest |> String.trim() |> Integer.parse()
    %{state | left: [num1 | state.left], right: [num2 | state.right]}
  end
)
|> (fn %{left: left, right: right} ->
      left
      |> Enum.reduce(
        0,
        fn l, acc ->
          acc + l * Enum.count(right, fn i -> i == l end)
        end
      )
    end).()
|> IO.puts()
1 Like

Hereā€™s mine (the Elixir version)

defmodule AdventOfCode.Y2024.Day01 do
  alias AdventOfCode.Helpers.{InputReader, Transformers}

  def input, do: InputReader.read_from_file(2024, 1)

  def run(input \\ input()) do
    input = parse(input)

    {run_1(input), run_2(input)}
  end

  defp run_1({left_list, right_list, _}) do
    for {left, right} <- Enum.zip(left_list, right_list), reduce: 0 do
      acc -> acc + abs(left - right)
    end
  end

  defp run_2({left_list, _, tally}) do
    for id_value <- left_list, reduce: 0 do
      acc -> acc + id_value * Map.get(tally, id_value, 0)
    end
  end

  def parse(data \\ input()) do
    {left, right} =
      for line <- Transformers.lines(data), reduce: {[], []} do
        {left_list, right_list} ->
          [left, right] = String.split(line)
          {[String.to_integer(left) | left_list], [String.to_integer(right) | right_list]}
      end

    {Enum.sort(left), Enum.sort(right), Enum.frequencies(right)}
  end
end

I was thinking about building two min-heaps and tally during parsing but gave up the idea, was too tired.