@marvintherain
Just wanted to note that you can use :ets
directly, without any extra libs.
Here's a basic rate-limiter
defmodule MyApp.RateLimit do
@moduledoc """
Thin wrapper around `:ets.update_counter/4` and a clean-up process to act as a fixed window rate limiter.
Based on https://github.com/michalmuskala/plug_attack
"""
use GenServer
@table __MODULE__
@doc """
Starts the process that creates and cleans the ETS table.
Accepts the following options:
- `:clean_period` for how often to perform garbage collection, defaults to 10 minutes
"""
def start_link(opts) do
GenServer.start_link(__MODULE__, opts)
end
@doc "Increments count and checks if it's still within limit"
def hit(key, scale, limit, increment \\ 1) do
now = now()
window = div(now, scale)
full_key = {key, window}
expires_at = (window + 1) * scale
count = :ets.update_counter(@table, full_key, increment, {full_key, 0, expires_at})
if count <= limit, do: {:allow, count}, else: {:deny, _retry_after = expires_at - now}
end
@impl true
def init(opts) do
clean_period = Keyword.get(opts, :clean_period, :timer.minutes(10))
:ets.new(@table, [
:named_table,
:set,
:public,
{:read_concurrency, true},
{:write_concurrency, true},
{:decentralized_counters, true}
])
schedule(clean_period)
{:ok, %{clean_period: clean_period}}
end
@impl true
def handle_info(:clean, state) do
:ets.select_delete(@table, [{{{:_, :_}, :_, :"$1"}, [], [{:<, :"$1", {:const, now()}}]}])
schedule(state.clean_period)
{:noreply, state}
end
defp schedule(clean_period) do
Process.send_after(self(), :clean, clean_period)
end
@compile inline: [now: 0]
defp now do
System.system_time(:millisecond)
end
end
And here's how it could be used
Start the rate-limiter, e.g. by the root supervisor
defmodule MyApp.Application do
@moduledoc false
use Application
@impl true
def start(_type, _args) do
children = [
# ...
MyApp.RateLimit,
# ...
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
end
And call it in your endpoint or a plug pipeline for the live view
Endpoint version:
defmodule MyApp.Endpoint do
use Phoenix.Endpoint, otp_app: :my_app
# ...
# https://github.com/ajvondrak/remote_ip
plug RemoteIp
plug :rate_limit
# ...
plug MyApp.Router
defp rate_limit(conn, _opts) do
case MyApp.RateLimit.hit({:global, conn.remote_ip}, _scale = :timer.seconds(10), _limit = 10) do
{:allow, _count} ->
conn
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
{:deny, retry_after_ms} ->
retry_after_seconds = div(retry_after_ms, 1000)
conn
|> put_resp_header("retry-after", retry_after_seconds)
|> send_resp(429, "You are too fast, and you are rate limited. Try again in #{retry_after_seconds} seconds.")
|> halt()
end
end
end
Router pipeline version:
defmodule MyAppWeb.Router do
use MyAppWeb, :router
# ...
pipeline :lv_rate_limit do
# https://github.com/ajvondrak/remote_ip
plug RemoteIp
plug :rate_limit, "my_live"
end
scope "/", MyAppWeb do
pipe_through [:browser, :lv_rate_limit]
live "/", MyLive.Index, :index
end
defp rate_limit(conn, namespace) do
case MyApp.RateLimit.hit({namespace, conn.remote_ip}, _scale = :timer.seconds(10), _limit = 10) do
{:allow, _count} ->
conn
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
{:deny, retry_after_ms} ->
retry_after_seconds = div(retry_after_ms, 1000)
conn
|> put_resp_header("retry-after", retry_after_seconds)
|> send_resp(429, "You are too fast, and you are rate limited. Try again in #{retry_after_seconds} seconds.")
|> halt()
end
end
end
Note that this assumes that your rate-limited liveivews cannot be navigated to without passing through plugs and performing an HTTP request first. If they can be, then you would also put the rate limiter into a hook or mount callbacks. Please see Security considerations — Phoenix LiveView v1.0.1