Advent of Code 2024 - Day 5

aha . lol … its still not valid
confirmed with a
|> Kernel.tap(fn u → is_valid_update(u, rules) |> IO.inspect() end)

:man_facepalming:

  defp fix_invalid_update(update, rules) do
    new_update =
      update
      |> Enum.reduce(update, fn next, acc ->
        rules
        |> Enum.filter(&(&1 |> Tuple.to_list() |> hd() == next))
        |> Enum.reduce(acc, fn {_, b}, acc_ ->
          b_index = acc_ |> Enum.find_index(&(&1 == b))

          if b_index == nil do
            acc_
          else
            new_next_id = acc_ |> Enum.find_index(&(&1 == next))

            if b_index < new_next_id do
              acc_ |> List.replace_at(new_next_id, b) |> List.replace_at(b_index, next)
            else
              acc_
            end
          end
        end)
      end)

    if is_valid_update(new_update, rules),
      do: new_update,
      else: fix_invalid_update(new_update, rules)
  end

need multiple recursive passes… DAY5 PASSED! <3

god you were right … and i didnt understand your comment first … i came to the conclusion myself

the interesting questions is WHY and how performant IS IT? for each update it potentially needs multiple passes …

answer:

"Used 4 passes to fix update"
"Used 5 passes to fix update"
"Used 2 passes to fix update"
"Used 5 passes to fix update"
"Used 4 passes to fix update"
"Used 2 passes to fix update"
"Used 5 passes to fix update"
"Used 4 passes to fix update"
"Used 4 passes to fix update"
"Used 3 passes to fix update"
"Used 3 passes to fix update"
"Used 3 passes to fix update"
"Used 5 passes to fix update"
"Used 3 passes to fix update"
"Used 2 passes to fix update"
"Used 4 passes to fix update"
"Used 2 passes to fix update"
"Used 5 passes to fix update"
"Used 4 passes to fix update"
"Used 2 passes to fix update"
"Used 3 passes to fix update"
"Used 2 passes to fix update"
"Used 3 passes to fix update"
"Used 5 passes to fix update"
"Used 5 passes to fix update"
"Used 4 passes to fix update"
"Used 3 passes to fix update"
"Used 3 passes to fix update"
"Used 2 passes to fix update"
"Used 4 passes to fix update"
"Used 6 passes to fix update"
"Used 3 passes to fix update"
"Used 3 passes to fix update"
"Used 4 passes to fix update"
"Used 3 passes to fix update"
"Used 4 passes to fix update"
"Used 6 passes to fix update"
"Used 2 passes to fix update"
"Used 4 passes to fix update"
"Used 1 passes to fix update"
"Used 5 passes to fix update"
"Used 3 passes to fix update"
"Used 2 passes to fix update"
"Used 4 passes to fix update"
"Used 2 passes to fix update"
"Used 5 passes to fix update"
"Used 3 passes to fix update"
"Used 4 passes to fix update"
"Used 4 passes to fix update"
"Used 3 passes to fix update"
"Used 6 passes to fix update"
"Used 5 passes to fix update"
"Used 5 passes to fix update"
"Used 4 passes to fix update"
"Used 4 passes to fix update"
"Used 5 passes to fix update"
"Used 5 passes to fix update"
"Used 5 passes to fix update"
"Used 3 passes to fix update"
"Used 3 passes to fix update"
"Used 2 passes to fix update"
"Used 4 passes to fix update"
"Used 5 passes to fix update"
"Used 4 passes to fix update"
"Used 2 passes to fix update"
"Used 3 passes to fix update"
"Used 4 passes to fix update"
"Used 5 passes to fix update"
"Used 4 passes to fix update"
"Used 5 passes to fix update"
"Used 5 passes to fix update"
"Used 5 passes to fix update"
"Used 3 passes to fix update"
"Used 3 passes to fix update"
"Used 3 passes to fix update"
"Used 5 passes to fix update"
"Used 2 passes to fix update"
"Used 3 passes to fix update"
"Used 3 passes to fix update"
"Used 6 passes to fix update"
"Used 5 passes to fix update"
"Used 2 passes to fix update"
"Used 6 passes to fix update"
"Used 4 passes to fix update"
"Used 2 passes to fix update"
"Used 5 passes to fix update"
"Used 5 passes to fix update"
"Used 3 passes to fix update"
"Used 3 passes to fix update"
"Used 2 passes to fix update"
"Used 4 passes to fix update"
"Used 3 passes to fix update"
"Used 4 passes to fix update"
"Used 2 passes to fix update"
"Used 5 passes to fix update"
"Used 4 passes to fix update"
"Used 2 passes to fix update"
"Used 4 passes to fix update"
"Used 4 passes to fix update"
"Used 2 passes to fix update"
"Used 5 passes to fix update"
"Used 4 passes to fix update"
"Used 4 passes to fix update"
"Used 5 passes to fix update"
"Used 6 passes to fix update"
"Used 4 passes to fix update"

not many … but if it was 1 mill incorrect it would mean 1mill * worst case 6 * = 6mill passes of the fix function.

Its O(n) though … says perplexity … since it doesnt grow … some updates may need few passes, some many, but doesnt affect the next

I also got mine down to 20ms using async_stream based parallelism but I think i7 won’t perform as good as M1 so anyhow fairly happy

Hey, can someone help me? This works for test cases but not the big data set - it’s like there are circular references. (splitting at new line, file is f)

rules = f[0].splitlines()
pages = f[1].splitlines()

ruleListA = []
ruleListB = []

for i in rules:
    ruleListA.append(i.split("|")[0])
    ruleListB.append(i.split("|")[1])

print(ruleListA)
print(ruleListB)
print(pages)

count = 0

incorrectLines = []

def test(line):
    pageList = []
    active = True
    for number in pages[line].split(","):
        pageList.append(number)
    valid = True
    for ruleNum in range(0, len(ruleListA)):
        if ruleListA[ruleNum] in pageList and ruleListB[ruleNum] in pageList:
            if pageList.index(ruleListA[ruleNum]) > pageList.index(ruleListB[ruleNum]):
                valid = False
                incorrectLines.append(pages[line])
                return -1
    if valid == True:
        return pageList[len(pageList) // 2]

for i in range(0, len(pages)):
    if test(i) != -1:
        count += int(test(i))
print(count)
print(incorrectLines)

correctOrder = []

listLock = False

print("incorrect")
print(incorrectLines)

graph = {}

print(ruleListB)

for i in range(0, len(ruleListA)):
    graph[ruleListA[i]] = []
    graph[ruleListB[i]] = []
for i in range(0, len(ruleListB)):
    graph[ruleListA[i]].append(ruleListB[i])

print(graph)
orderList = []
while graph:
  for i in graph:
      if graph[i] == []:
          num = i
          orderList.append(i)
  for i in graph:
      for j in graph[i]:
          if graph[i][graph[i].index(j)] == num:
              graph[i].remove(num)
  del graph[num]

global count2
count2 = 0

def check(order, line):
   orderToPrint = [x for x in order if x in line]
   return orderToPrint[len(orderToPrint) // 2]

for i in incorrectLines:
    count2 += int(check(orderList, i))
    
print(count2)

I can’t figure this out

@ESOrSomething FWIW, this forum is targeted at Elixir (and other BEAM languages) so you may want to find a Python-specific place to ask questions about Python.

Oh, sorry! Thanks for letting me know. I will try to find another place to post this!

This was easier than it seemed at first. Part one I just did a naive thang.

For part 2 I realised I could just implement a sorting function and use Enum.sort_by to fix it :sweat_smile:

Part 1

  def day_5_1() do
    input = File.read!("./day_5_input.txt")
    {rules, reports} = rules_reports(input, [])

    rules = numbers_in_rules(rules)

    Enum.reduce(reports, 0, fn report, mid_number_sum ->
      report_good? =
        Enum.all?(report, fn number ->
          {must_come_before, must_come_after} = Map.fetch!(rules,number)
          {comes_after, comes_before} = numbers_before(number, report)

          (comes_after == [] || not Enum.any?(comes_after, &(&1 in must_come_before))) &&
            (comes_before == [] || not Enum.all?(comes_before, &(&1 in must_come_after)))
        end)

      if report_good? do
        mid = Enum.at(report, div(length(report), 2))
        mid_number_sum + mid
      else
        mid_number_sum
      end
    end)
  end

  defp comes_before(number, rules), do: for({^number, rule} <- rules, do: rule)
  defp comes_after(number, rules), do: for({rule, ^number} <- rules, do: rule)

  defp numbers_in_rules(rules) do
    Enum.reduce(rules, %{}, fn {left, right}, acc ->
      acc
      |> Map.put_new_lazy(left, fn -> {comes_before(left, rules), comes_after(left, rules)} end)
      |> Map.put_new_lazy(right, fn -> {comes_before(right, rules), comes_after(right, rules) } end)
    end)
  end

  # May want to raise if we see dupes here.
  defp numbers_before(number, numbers) do
    {_, before, afterr} =
      Enum.reduce(numbers, {false, [], []}, fn n, {spotted?, before, afterr} ->
        if spotted? do
          {spotted?, before, [n | afterr]}
        else
          if n == number do
            {true, before, afterr}
          else
            {false, [n | before], afterr}
          end
        end
      end)

    {before, afterr}
  end

  @pipe "|"
  def rules_reports(<<@new_line, @new_line, rest::binary>>, rules) do
    {Enum.reverse(rules), parse_reports(rest, [], [])}
  end

  def rules_reports(<<@new_line, rest::binary>>, acc), do: rules_reports(rest, acc)

  def rules_reports(<<left::binary-size(2), @pipe, right::binary-size(2), rest::binary>>, rules) do
    rules_reports(rest, [{String.to_integer(left), String.to_integer(right)} | rules])
  end

  def parse_reports(<<>>, _report, reports), do: Enum.reverse(reports)

  def parse_reports(<<@new_line, rest::binary>>, report, reports) do
    parse_reports(rest, [], [Enum.reverse(report) | reports])
  end

  def parse_reports(<<@comma, rest::binary>>, report, reports) do
    parse_reports(rest, report, reports)
  end

  def parse_reports(<<numb::binary-size(2), rest::binary>>, report, reports) do
    parse_reports(rest, [String.to_integer(numb) | report], reports)
  end

Part 2

  def day_5_2() do
    input = File.read!("./day_5_input.txt")
    {rules, reports} = rules_reports(input, [])

    rules = numbers_in_rules(rules)

    Enum.reduce(reports, 0, fn report, mid_number_sum ->
      report_good? =
        Enum.all?(report, fn number ->
          {must_come_before, must_come_after} = Map.fetch!(rules,number)
          {comes_after, comes_before} = numbers_before(number, report)

          (comes_after == [] || not Enum.any?(comes_after, &(&1 in must_come_before))) &&
            (comes_before == [] || not Enum.all?(comes_before, &(&1 in must_come_after)))
        end)

      if report_good? do
        mid_number_sum
      else
        # Kinda nasty to do this after? As could do it in the moment. but yea
        mid = Enum.at(fix_report(report, rules), div(length(report), 2))
        mid_number_sum + mid
      end
    end)
  end

  def is_less_than?(number, another, rules) do
    {_must_come_before, must_come_after} = Map.fetch!(rules, number)
    another in must_come_after
  end

  # always an odd number of items in report as there is always a mid point.
  defp fix_report(report, rules) do
    Enum.sort_by(report, & &1, fn left, right -> !is_less_than?(left, right, rules) end)
  end

Obligatory meaningless benchmarks:


Name              ips        average  deviation         median         99th %
Day 5 1        1.01 K        0.99 ms    ±11.12%        0.96 ms        1.18 ms
Day 5 2        0.75 K        1.33 ms    ±10.55%        1.31 ms        1.59 ms

Comparison:
Day 5 1        1.01 K
Day 5 2        0.75 K - 1.34x slower +0.34 ms

Memory usage statistics:

Name       Memory usage
Day 5 1         1.62 MB
Day 5 2         1.82 MB - 1.12x memory usage +0.199 MB

**All measurements for memory usage were the same**

Reduction count statistics:

Name    Reduction count
Day 5 1        545.29 K
Day 5 2        615.17 K - 1.13x reduction count +69.88 K

**All measurements for reduction count were the same**

Working on getting caught up so I didn’t even bother refactoring the duplicated logic between part 1 & 2. Calling this copy/paste/modify junk good enough. Looking forward to reading through other solutions. AOC is turning out to be a fun, and sometimes frustrating, way to learn the Elixir standard library.

defmodule Aoc2024.Day5 do
  @moduledoc false

  defp get_input(file) do
    File.read!(file)
    |> String.split("\n")
    |> Enum.split_while(fn line -> line != "" end)
    |> Tuple.to_list()
    |> Enum.map(&Enum.filter(&1, fn line -> line != "" end))
  end

  defp parse_reqs(lines) do
    lines
    |> Enum.map(&String.split(&1, "|"))
    |> Enum.map(fn [a, b] -> [{b, [a]}] end)
    |> Enum.map(&Map.new/1)
    |> Enum.reduce(Map.new(), fn req, reqs -> Map.merge(reqs, req, fn _key, value1, value2 -> value1 ++ value2 end) end)
  end

  defp correctly_ordered(update, reqs) do
    ordered? = update
    |> Enum.with_index()
    |> Enum.all?(fn {page, index} ->
      Map.get(reqs, page, [])
      |> Enum.all?(fn dep ->
        i = Enum.find_index(update, fn p -> p == dep end)
        i < index or i == nil
      end)
    end)
    if ordered?, do: update, else: false
  end

  defp incorrectly_ordered(update, reqs) do
    ordered? = update
    |> Enum.with_index()
    |> Enum.all?(fn {page, index} ->
      Map.get(reqs, page, [])
      |> Enum.all?(fn dep ->
        i = Enum.find_index(update, fn p -> p == dep end)
        i < index or i == nil
      end)
    end)
    if ordered?, do: false, else: update
  end

  defp get_middle_page(update) do
    Enum.at(update, Integer.floor_div(length(update), 2))
  end

  def part1(file) do
    [reqs, updates] = get_input(file)
    reqs = parse_reqs(reqs)

    updates
    |> Enum.map(&String.split(&1, ","))
    |> Enum.filter(&correctly_ordered(&1, reqs))
    |> Enum.map(&get_middle_page/1)
    |> Enum.map(&String.to_integer/1)
    |> Enum.sum()
  end

  defp add(new_update, page, bad_update, reqs) do
    update = Map.get(reqs, page, [])
    |> Enum.reduce(new_update, fn dep, acc ->
      if not Kernel.in(dep, bad_update) or Kernel.in(dep, acc) do
        acc
      else
        add(acc, dep, bad_update, reqs)
      end
    end)
    [page | update]
  end

  defp order_correctly(update, reqs) do
    update
    |> Enum.reduce([], fn page, acc ->
      if Kernel.in(page, acc) do
        acc
      else
        add(acc, page, update, reqs)
      end
    end)
  end

  def part2(file) do
    [reqs, updates] = get_input(file)
    reqs = parse_reqs(reqs)

    updates
    |> Enum.map(&String.split(&1, ","))
    |> Enum.filter(&incorrectly_ordered(&1, reqs))
    |> Enum.map(&order_correctly(&1, reqs))
    |> Enum.map(&get_middle_page/1)
    |> Enum.map(&String.to_integer/1)
    |> Enum.sum()
  end
end
1 Like

Like every year I’ve been following along with AOC at my own pace. Here’s my Livebook solution for Day 5: I tend to model the problems as structured data and then manipulate that data when possible. livebooks/advent-of-code/2024/day05.livemd at main · sdball/livebooks · GitHub

Not really been making an effort to keep up this year. Stuck on this one b/c my intuition is apparently wrong. My thought was use the rules to make an acyclic directed graph with edges where every left hand value is incident upon right hand value. Then checking the updates just means there has to be a path between each consecutive pair of page numbers. Works great for the example data but apparently none of the updates from the real input data meet this condition. Any insight into why my logic is wrong?

def do_order(graph, [[a,b] | rest]) do 
  :digraph.add_vertex(graph, a)
  :digraph.add_vertex(graph, b)
  :digraph.add_edge(graph, a, b)
  do_order(graph, rest)
end

def solve(rules, updates) do 
  updates
  |> Enum.map(&Enum.chunk_every(&1, 2, 1, :discard))
  |> Enum.filter(&Enum.all?(&1, fn [a,b] -> :digraph.get_path(rules, a, b) end)
  |> Enum.map(fn update -> Enum.at(update, div(length(update), 2)) |> hd() end)
  |> Enum.sum()
end

EDIT:
I’m apparently doing something wrong with the process of adding edges to the graph. There are 1176 rules in my data set but I end up with a graph that contains only 632 edges. I can’t figure out why …

2 Likes

This one took longer than it should, because it turned up a possible bug in :digraph_utils.

Day 1:

defmodule PrintQueue do
  def read(filename) do
    filename
    |> File.read!()
    |> String.split("\n\n")
    |> then(fn [rules, pages] ->
      {
        parse_rules(rules),
        parse_pages(pages)
      }
    end)
  end

  defp parse_rules(rules) do
    g = :digraph.new()

    rules
    |> String.split("\n")
    |> Enum.each(fn line ->
      line
      |> String.split("|")
      |> Enum.map(&String.to_integer/1)
      |> then(fn [v1, v2] ->
        :digraph.add_vertex(g, v1)
        :digraph.add_vertex(g, v2)
        :digraph.add_edge(g, v1, v2)
      end)
    end)

    g
  end

  defp parse_pages(pages) do
    pages
    |> String.trim()
    |> String.split("\n")
    |> Enum.map(fn line ->
      line
      |> String.split(",")
      |> Enum.map(&String.to_integer/1)
    end)
  end

  def valid?(page, rules) do
    subgraph = fixed_subgraph(rules, page)

    page
    |> triangle()
    |> Enum.to_list()
    |> Enum.each(fn {v1, v2} ->
      :digraph.add_edge(subgraph, v1, v2)
    end)

    result = :digraph_utils.topsort(subgraph)

    :digraph.delete(subgraph)
 
    result
  end

  defp triangle([_]), do: []
  defp triangle([h | rest]) do
    rest
    |> Enum.map(&{h, &1})
    |> Stream.concat(triangle(rest))
  end

  def edges(g) do
    :digraph.edges(g)
    |> Enum.map(&:digraph.edge(g, &1))
    |> Enum.map(fn {_, a, b, _} -> {a, b} end)
    |> Enum.sort()
  end

  defp fixed_subgraph(g, vs) do
    sg = :digraph_utils.subgraph(g, vs)

    [{_, last_eid}] = :ets.lookup(elem(g, 3), :"$eid")
    ntab = elem(sg, 3)
    :ets.delete(ntab, :"$eid")
    :ets.insert(ntab, {:"$eid", last_eid})

    sg
  end

  def middle(vs) do
    len = length(vs)

    Enum.at(vs, div(len, 2))
  end
end

{rules, pages} = PrintQueue.read("input.txt")

pages
|> Enum.map(&PrintQueue.valid?(&1, rules))
|> Enum.filter(& &1)
|> Enum.map(&PrintQueue.middle/1)
|> Enum.sum()
|> IO.inspect()

The idea:

  • turn the rules into a :digraph of “x precedes y”
  • for a list of pages, get a subgraph that only has those pages as vertices
  • compute all the “x precedes y” relations from the given page list and add them to the subgraph
  • check the subgraph for a cycle; a rule violation (eg putting 75 before 97 with the rule 97|75) will have one

Hilariously, part 2 requires LESS effort than part 1 - the “correctly ordered” updates are just a :digraph_utils.topsort of the subgraph!

Is it correct that the short path must be just the two vertices? I think this is only true for the case where all possible pairs are defined. If you have a data set with just rules “a|b” and “b|c”, the update “a,c” should be valid even though the shortest path on a graph would be [a,b,c]. Am I incorrect?

1 Like

It turned out that though the whole graph is cyclic, the minimal subgraph containing all the vertices in each “update” is acyclic.

1 Like

Thanks for the tip. I had it in my head the whole graph would have to be acyclic and this caused some edges to get silently rejected.

Turns out :digraph_utils.subgraph and :digraph_utils.topsort made part 2 trivially easy.

1 Like

Indeed, as the input is exhaustive, this is all that’s needed (no need to build a graph).

any benefit to sorted? and greater? being anonymous functions rather than defined, named helper functions?

I would need to pass the map out of the input parsing function, and write inline anonymous functions to Enum.reject and Enum.sort that are closures over the map that call the named functions anyway. So 4 functions and an exposed map, rather than 2 functions.

1 Like

LOL came here well after the fact, and discovered that I had been ambitious with my
solution to Part 2: I wrote my own stable topological sort, using :gb_sets to provide
both the container for the edges, and also a heap-like container for ensuring the
stability of the sort.

defmodule Graph do

  def add_edge(edges, pred, succ) do
    edges = :gb_sets.add({:fwd, pred, succ}, edges)
    :gb_sets.add({:rev, succ, pred}, edges)
  end

  def delete_edge(edges, pred, succ) do
    edges = :gb_sets.delete_any({:fwd, pred, succ}, edges)
    :gb_sets.delete_any({:rev, succ, pred}, edges)
  end

  defp collect_while(iter, cont?, acc \\ []) do
    case :gb_sets.next(iter) do
      :none ->
        Enum.reverse(acc)
      {elem, iter_next} ->
        if cont?.(elem) do
          collect_while(iter_next, cont?, [elem | acc])
        else
          Enum.reverse(acc)
        end
    end
  end

  def has_edges?(edges, dir, node) do
    iter = :gb_sets.iterator_from({dir, node, -1}, edges)
    case :gb_sets.next(iter) do
      :none -> 
        false
      {{d, x, _y}, _iter_next} ->
        d == dir and x == node
    end
  end

  def get_edges(edges, dir, node) do
    iter = :gb_sets.iterator_from({dir, node, -1}, edges)
    collect_while(iter, fn {d, x, _y} ->
      d == dir and x == node
    end) |>
      Enum.map(fn {_d, _x, y} -> y end)
  end
  
  def graph_edges(update, rules) do
    nodes = MapSet.new(update)
    Enum.reduce(rules, :gb_sets.empty(), fn {bef, afts}, edges ->
      if MapSet.member?(nodes, bef) do
        Enum.filter(afts, fn aft -> MapSet.member?(nodes, aft) end) |>
          Enum.reduce(edges, fn aft, edges ->
            add_edge(edges, bef, aft)
          end)
      else
        edges
      end
    end)
  end

  def terms_order(update, edges) do
    Enum.with_index(update) |>
      Enum.reduce({:gb_sets.empty(), Map.new()}, fn {node, i}, {terms, order} ->
        if has_edges?(edges, :fwd, node) do
          {terms, Map.put(order, node, i)}
        else
          {:gb_sets.add({i, node}, terms), order}
        end
      end)
  end

  def construct_order(terms, order, edges, acc \\ []) do
    if :gb_sets.is_empty(terms) do
      acc
    else
      {{_i, node}, terms} = :gb_sets.take_largest(terms)
      acc = [node | acc]
      preds = get_edges(edges, :rev, node)
      edges = Enum.reduce(preds, edges, fn pred, edges ->
        delete_edge(edges, pred, node)
      end)
      terms = Enum.filter(preds, fn pred ->
          not has_edges?(edges, :fwd, pred) 
        end) |>
        Enum.reduce(terms, fn pred, terms ->
          i = order[pred]
          :gb_sets.add({i, pred}, terms)
        end)
      construct_order(terms, order, edges, acc)
    end
  end

  def reorder(update, rules) do
    edges = graph_edges(update, rules)
    {terms, order} = terms_order(update, edges)
    construct_order(terms, order, edges)
  end
end
2 Likes