Advent of Code 2024 - Day 19

This one was scary at first.

I loved how part one directs you into an obvious optimization, and then part 2 kicks your butt by asking to not do that very specific optimization :smiley:

My solution for part 2 runs in between 150ms and 380ms. I guess it’s because of the async streams, but it’s always fast (around 150, rarely even 120) when I run it once, but then with benchee for 10 seconds it averages between 170 and 380 depending on the runs …) Anyways, is fast enough.

defmodule AdventOfCode.Solutions.Y24.Day19 do
  alias AoC.Input

  def parse(input, _part) do
    [towels, targets] = input |> Input.read!() |> String.trim() |> String.split("\n\n")

    towels = towels |> String.split([",", " "], trim: true) |> Enum.map(&{&1, byte_size(&1)})
    targets = String.split(targets, "\n", trim: true)
    {towels, targets}
  end

  def part_one({towels, targets}) do
    primitives = reduce_towels(towels)
    targets = filter_possible_targets(targets, primitives)
    length(targets)
  end

  # remove towels that can be constructed with other towels
  defp reduce_towels(towels) do
    case Enum.split_with(towels, fn {text, _} = t -> possible_target?(text, towels -- [t]) end) do
      {[], all_primitives} -> all_primitives
      {_composable, primitives} -> reduce_towels(primitives)
    end
  end

  defp filter_possible_targets(targets, towels) do
    targets
    |> Task.async_stream(&{possible_target?(&1, towels), &1}, ordered: false, timeout: :infinity)
    |> Enum.flat_map(fn
      {:ok, {true, t}} -> [t]
      {:ok, {false, _}} -> []
    end)
  end

  defp possible_target?(target, towels) do
    possible_target?(target, towels, towels)
  end

  defp possible_target?("", _, _), do: true
  defp possible_target?(_, [], _), do: false

  defp possible_target?(target, [{h, b} | t], towels) do
    sub_match? =
      case target do
        <<^h::binary-size(b), rest::binary>> -> possible_target?(rest, towels, towels)
        _ -> false
      end

    sub_match? || possible_target?(target, t, towels)
  end

  def part_two({towels, targets}) do
    primitives = reduce_towels(towels)

    targets
    |> filter_possible_targets(primitives)
    |> Task.async_stream(&count_combinations(&1, towels), ordered: false, timeout: :infinity)
    |> Enum.reduce(0, fn {:ok, n}, acc -> acc + n end)
  end

  defp count_combinations(target, towels) do
    possible_towels = Enum.filter(towels, fn {text, _} -> String.contains?(target, text) end)
    do_count(%{target => 1}, possible_towels, 0)
  end

  defp do_count(target_suffixes, _towels, count) when map_size(target_suffixes) == 0 do
    count
  end

  defp do_count(target_suffixes, towels, count) do
    new_suffixes =
      for {t, count} <- target_suffixes, {h, b} <- towels, reduce: [] do
        sufxs ->
          case t do
            <<^h::binary-size(b), rest::binary>> -> [{rest, count} | sufxs]
            _ -> sufxs
          end
      end

    {target_suffixes, finished_count} =
      Enum.reduce(new_suffixes, {%{}, 0}, fn
        {"", cpt}, {map, finished_count} -> {map, finished_count + cpt}
        {sufx, cpt}, {map, finished_count} -> {Map.update(map, sufx, cpt, &(&1 + cpt)), finished_count}
      end)

    do_count(target_suffixes, towels, count + finished_count)
  end
end

I started with a straightforward solution I coded up in a few minutes, but it was taking too long on the real input, so I spent a while to rewrite it with a prefix tree (trie) instead. It ended up being too slow too, at which point I realized that, of course, I just needed to use memoization. So then I added it and was able to get the final answer. Ironically, it turned out that my original solution just lacked memoization as well so after adding it it ended up being even faster. Though both are slow compared to your runtime—definitely takes a few seconds for me. I didn’t parallelize, though.

With prefix tree and custom memoization: advent-of-code-2024/lib/advent_of_code2024/day19_trie.ex at main · ibarakaiev/advent-of-code-2024 · GitHub

Straightforward, using a nice memoization library: advent-of-code-2024/lib/advent_of_code2024/day19.ex at main · ibarakaiev/advent-of-code-2024 · GitHub

1 Like

I do not use memoization. I was tempted to but the puzzles links to 2023 day 12 which is a huge hint on the solution.

For the first part, I just brute force but not with all towels, I start by removing all towels that can be created with other towels until there are only primitive towels left. This slows down patterns that are possible a little bit, but they are fast. It make the time to find impossible patterns very short.

For part two, for each target I start by removing towels that are not found in the pattern, to reduce the search scope. Then for each towel I check if the pattern starts with the towel, and when it does, I keep the rest of the pattern and I count 1, then I repeat with that list of “remainders” until the remainder is "" (keep the count), or unfinished (discard the count).

Edit: Hmmm by writing this post I realize that I could optimize even more. Because for brwu I will find the wu remainder after two iterations, by removing b then r. But I will find it after a single iteration by removing br, so I’ll end up by computing possible patters on wu twice.

I could memoize that …

Yeah sorry that was a lot of spoiler tags…

EDIT
yeah by adding a little bookkeeping I can compute a suffix only once

  defp count_combinations(target, towels) do
    possible_towels = Enum.filter(towels, fn {text, _} -> String.contains?(target, text) end)
    do_count([{target, 1}], possible_towels)
  end

  defp do_count([{"", count}], _) do
    count
  end

  defp do_count([longest | target_suffixes], towels) do
    {target_suffix, suffix_score} = longest

    new_suffixes =
      Enum.flat_map(towels, fn {h, b} ->
        case target_suffix do
          <<^h::binary-size(b), rest::binary>> -> [{rest, suffix_score}]
          _ -> []
        end
      end)

    target_suffixes = insert_all(target_suffixes, new_suffixes)

    do_count(target_suffixes, towels)
  end

  defp insert_all(target_suffixes, [h | t]), do: insert_all(insert(target_suffixes, h), t)
  defp insert_all(target_suffixes, []), do: target_suffixes

  defp insert([{longest, _} = top | t], {text, _} = new) when byte_size(text) < byte_size(longest) do
    [top | insert(t, new)]
  end

  defp insert([{same, count} | t], {same, add}) do
    [{same, count + add} | t]
  end

  defp insert(t, new) do
    [new | t]
  end

Part 2 is around 80ms when my computer is not busy doing its own life as usual.

Part 1 was nice and easy and I was amazed that the DFS I coded worked on literally the first try. That never ever happens.

Part 2 was a spanner in the works though - I have (what I think is) a nice solution, I’m not sure the name of the technique/algorithm I used though. Maybe memoization that everyone keeps mentioning.

I use a priority queue to keep track of the leftover strings to process for each requested towel, and a cache to track how many times I’ve seen each string.

If a string comes up in the queue and I’ve seen it before, then I can discard it and increment the number of different ways we’ve gotten to this point in the cache (by the number of ways we got to the previous point). Then, when the empty string comes up in the queue, I’ve seen all the ways to get here and can pluck it out of my cache. Works pretty well!

Name                     ips        average  deviation         median         99th %
day 19, part 1         29.38       34.03 ms     ±0.86%       34.01 ms       35.47 ms
day 19, part 2         29.36       34.06 ms     ±1.18%       34.03 ms       36.73 ms
1 Like

Nice, I guess my last optimization could be refined again and I would end up with that you did directly haha

Surprisingly short code for today’s solution. I guess part 2 is not the fastest, but I don’t think it’s too bad for something I just made up on the spot.

defmodule Day19 do
  def parse(input) do
    [towels, patterns] = String.split(input, "\n\n")
    towels = String.split(towels, ", ")
    patterns = String.split(patterns, "\n", trim: true)

    {towels, patterns}
  end

  def possible?("", _towels), do: true

  def possible?(pattern, towels) do
    towels
    |> Enum.any?(fn towel ->
      case pattern do
        ^towel <> remaining -> possible?(remaining, towels)
        _ -> false
      end
    end)
  end

  def arrangements(pattern, towels) when is_binary(pattern) do
    arrangements(%{pattern => 1}, towels)
  end

  def arrangements(patterns, _towels) when map_size(patterns) == 0, do: 0

  def arrangements(%{"" => result} = patterns, _towels) when map_size(patterns) == 1, do: result

  def arrangements(patterns, towels) do    
    {longest, count} = Enum.max_by(patterns, fn {pattern, _count} -> String.length(pattern) end)
    patterns = Map.delete(patterns, longest)

    patterns =
      towels
      |> Enum.map(fn towel ->
        case longest do
          ^towel <> remaining -> {remaining, count}
          _ -> nil
        end
      end)
      |> Enum.reject(&is_nil/1)
      |> Map.new()
      |> Map.merge(patterns, fn _key, count1, count2 -> count1 + count2 end)
    
    arrangements(patterns, towels)
  end
end

# Parse
{towels, patterns} = Day19.parse(puzzle_input)

# Part 1
Enum.count(patterns, &Day19.possible?(&1, towels))

# Part 2
patterns
|> Enum.map(&Day19.arrangements(&1, towels))
|> Enum.sum()

My straightforward solution for part 1 didn’t terminate for my real input.

I then implemented a trie (prefix tree). That took me a while, but it still didn’t terminate.

I then added memoization using the process dictionary. That worked.

After solving part 2, I cleaned up my code. I tried to use Memoize for memoization but the time increased to 31 seconds. I did some attempts to make Memoize use only the first argument of my count function, but I couldn’t make it work. In the end, I rewrote my count function to take an explicit memo argument.

The combined runtime for both parts and the examples is 0.4 seconds.

2 Likes

Mine takes about 50ms per part, using in-process memoization since mixing designs does not seem to bring much.

defmodule AdventOfCode.Solution.Year2024.Day19 do
  use AdventOfCode.Solution.SharedParse

  @impl true
  def parse(input),
    do: String.split(input, "\n", trim: true) |> then(&{String.split(hd(&1), ", "), tl(&1)})

  def part1({patterns, designs}), do: run(patterns, designs) |> Enum.count(&(&1 > 0))

  def part2({patterns, designs}), do: run(patterns, designs) |> Enum.sum()

  defp run(patterns, designs) do
    Task.async_stream(designs, &ways(&1, patterns), ordered: false)
    |> Stream.map(fn {:ok, n} -> n end)
  end

  defp ways("", _), do: 1

  defp ways(design, patterns) do
    with nil <- Process.get(design) do
      Enum.reduce(patterns, 0, fn pattern, total ->
        case design do
          <<^pattern::binary, rest::binary>> -> ways(rest, patterns) + total
          _ -> total
        end
      end)
      |> tap(&Process.put(design, &1))
    end
  end
end
1 Like

I compiled a big regex for part 1. It is slow and does not work for part 2 but it was trivial to implement. Now I have to find another kind of solution to be able to do part 2 and probably part 1 faster.

{towels, designs} = puzzle_input
|> String.split("\n")
|> then(fn [towels, _ | designs] -> {String.split(towels, ", "), designs} end)

{:ok, regex} = towels |> Enum.join("|") |> then(fn x -> "^(#{x})+$" end) |> Regex.compile()

designs
|> Enum.reduce(0, fn design, n -> if String.match?(design, regex), do: n+1, else: n end)
|> IO.inspect(label: "Part 1")

2 Likes

hah, that’s a clever idea!

Part 1 was pretty straight forward. Part 2 I figured may take memoization, but I tried brute force anyway, and it did not terminate. So then I tried adding a “already solved” map and carry it along during the recursion, but I got all twisted in my thinking on this. So I converted it to a global variable, and that was easier to reason about.

I really should figure out how to do this in a recursive functional way, rather than relying on global state.

I also used Regex for part1, even though I knew it wouldn’t work for part2 :slight_smile:

I spent a long time trying to use recursion with memoization but couldn’t get it to terminate on the input.

I thought about it again but in terms of a flow chart/tree:

tree for example towels
├── r
│   ├── :end
│   └── b
│       └── :end
├── w
│   └── r
│       └── :end
├── b
│   ├── :end
│   ├── w
│   │   └── u
│   │       └── :end
│   └── r
│       └── :end
└── g
    ├── :end
    └── b
        └── :end

So if looking at color r, it could end there and start down a new tree or keep going, looking for a b.
I reduce the towels to a tree of maps, the r branch for example: %{:end => :end, "b" => %{:end => :end}}.
I reduce the pattern, one color at a time, accumulating all possible permutations in a list of {times, branch} tuples. All branches with :end states are summed to create a new permutation from the current color. All permutations with a next step matching the current color are advanced. The list should only contain unique states. On completion I sum the :end states.

Part 2 runs in 32ms on my old machine.

I solved it using a combination of Trie and memoization. I optimized it further with a custom implementation of Trie that returns back all intermediate prefixes and the remaining strings. For example, If we have a trie with values [“r”, “g”, “b”, “rb”, “rg”, “rbrgr”] and we test it on a value “rbrgr”, it would give the following in one pass:

[
  ok: %Advent.Structures.Trie.Node{depth: 5, final?: true, children: %{}},
  unfinished: "rgr",
  unfinished: "brgr"
]

I recursively explored all the options after getting these.
It ran in ~19ms for part2 on my machine.
Code the the same can be found here: AdventOfCodeElixir/lib/advent/year2024/day19.ex at main · divxvid/AdventOfCodeElixir · GitHub

1 Like

I have never written a macro before, but part one inspired me to give it a go. In hindsight, it was a bad idea, but it worked for part one.

defmodule LinenLayout do
  defmacro __using__(_opts) do
    quote do
      import LinenLayout

      # safeq (resp. safer) is needed, because I wanted a clause like:
     # def q(_), do: 0
     # but Idk how to make sure that clause comes _after_ all the def's
     # that would be expanded from macros, so instead I use try/rescue to
     # catch invocations that don't have matching defs (e.g., patterns s.t. no
     # towel exists for.
      def safeq(x) do
        try do
          q(x)
        rescue _ -> nil
        end
      end
  
      def safer(x) do
        try do
          r(x)
        rescue _ -> nil
        end
      end

      # these two stop the recursive steps below
      def qq(nil) do
        0
      end
      # using `raise` for control flow. I don't like it, but I was desperate
      # will be rescued in `count_makeable`
      def qq(1) do
        raise "yes"
      end
      
      def qq(x) do
          (safeq(x) |> qq()) + (safer(x) |> qq())
      end

      # invoked when there is no pattern left - we had all the towels we needed
      def q(""), do: 1
      def r(""), do: 1
      def count_makeable(towels) do
        towels 
        |> String.split("\n", trim: true)
        |> Stream.map(&
          try do
            qq(&1)
          # qq throws when `safeq` (resp. `safer`) finished completing a patttern
          rescue _ in RuntimeError -> 1
          end) 
        |> Enum.sum()
      end
    end
  end

  # I didn't need `f` _and_ `h`, but I wanted
  # the macro to expand out to functions with different names.
  # there is a way to do this as an arg to the macro, but  i was lazy.
  # these are exactly the same, to understand why they exist, read `g/1`
  # 
  # The basic idea of the approach is that we will use Elixir's binary
  # pattern matching to do all of the work. Each towel in our
  # inventory will get its own `def q(color <> rest)`. We will recursively call on `q(rest)` 
  # until either:
  # (a) We exhaust the pattern (yay, we had all the towels we needed!) 
  # (b) we throw an error for no clause matching input params (the pattern began 
  # with color  we don't have a towel for).
  defmacro f(i) do
    quote do
      def q(unquote(i) <> rest), do: rest
    end
  end
  
 defmacro h(i) do
    quote do
      def r(unquote(i) <> rest), do: rest
    end
  end

  # takes a list of towels and expands out to two sets of functions:
  # one set where the clauses are ordered longest towel first, e.g.,
  # if `bgr` and `b` are both towels `q("bgr" <> rest)` will appear before
  # `q("b" <> rest)`.
  # the other set is reversed - these are the `r` functions, expanded from `h`, above.
  # 
  # why did I do this? because just one of these directions will miss some patterns
 # because they will be greedy in either using the towels with more stripes first or
 # using the towels with the least stripes first. Doing both was my attempt to fix the
 # issue where either of these under counted. This doesn't _really_ fix the real issue.
 # There could be a towel `"bg"` that is "between" `"bgr"` and `"b"` that would never be
# accounted for if the pattern began with `"bgr"`. In fact, this exact issue is probably why
# this approach will not work for pt. 2.
  defmacro g(u) do
   # `u` is the list of towels we have 
   u = u
    |> String.split(", ")
    

   # expand out `f` for all the towels
    (u 
      |> Enum.sort_by(fn l -> -String.length(l) end) 
      |> Enum.map(fn o ->
        quote do
          f(unquote(o))
        end  
        end)
    )
    ++
    (
     # then expand out `h`
      u 
      |> Enum.sort_by(&(String.length(&1))) 
      |> Enum.map(&(quote do h(unquote(&1)) end))
    )
  end
end

Now actually solving part 1:

defmodule Foo do
  use LinenLayout

  g "r, wr, b, g, bwu, rb, gb, br"
end
"""
brwrr
bggr
gbbr
rrbgbr
ubwu
bwurrg
brgr
bbrgwb
"""
|> Foo.count_makeable()
1 Like