Multipart file content is empty when send to another container service

I have a phoenix api server and a python machine learning server. Phoenix(api server) side handle things like users, projects, api keys and record api request etc, and most importantly interact with internal machine learning server(ml server).

One of the request is client send an image file to api server and then it will send the file again to ml server for any mean processing, the process looks like: client <-> api server <-> ml server.

The expected result would be ml server be able to receive the file and process it, but it does not, what api server sent to ml server was empty byte of image file. The whole process works as expected in development but not after they are containerized and compose as services.

Below are part of codes related to the process

# api_controller.ex
defmodule AwesomeAppWeb.APIController do
  use AwesomeAppWeb, :controller
  
  alias AwesomeApp.MachineLearning

  # ...
  def request_0(conn, %{"image" => %Plug.Upload{} = image}) do
    case MachineLearning.process_0(image.path) do
      {:ok, %{status: 200, body: process_result}} ->
        # send success response ...
      _ ->
        # send not success response ...
    end
  end
  # ...
end

# machine_learning.ex
defmodule AwesomeApp.MachineLearning do
  use Tesla

  adapter Tesla.Adapter.Hackney, recv_timeout: 10_000

  plug Tesla.Middleware.JSON
  plug Tesla.Middleware.DecodeJson

  def process_0(image_file_path) do
    ml_host = Application.get_env(:awesome_app, :ml_host)

    Tesla.Multipart.new()
    |> Tesla.Multipart.add_file(image_file_path, name: :image)
    |> (&post("#{ml_host}/process_0", &1)).()
  end
end

Below are logs of one unexpected result:

# log before post
%Tesla.Multipart{
  boundary: "g2ully8QDwzqIr7EMn1kJmsPN1LslS",
  content_type_params: [],
  parts: [
    %Tesla.Multipart.Part{
      body: %File.Stream{
        line_or_bytes: 2048,
        modes: [:raw, :read_ahead, :read, :binary],
        path: "/opt/app/plug_tmp/plug-1576/multipart-1576227636-921572167153109-1",
        raw: true
      },
      dispositions: [
        name: :face_video,
        filename: "multipart-1576227636-921572167153109-1"
      ],
      headers: []
    }
  ]
}
# log after post
{:ok,
 %Tesla.Env{
   __client__: %Tesla.Client{adapter: nil, fun: nil, post: [], pre: []},
   __module__: AwesomeApp.MachineLearning,
   body: 0,
   headers: [
     {"server", "gunicorn/20.0.4"},
     {"date", "Fri, 13 Dec 2019 09:00:37 GMT"},
     {"connection", "close"},
     {"content-type", "application/json"},
     {"content-length", "1"}
   ],
   method: :post,
   opts: [],
   query: [],
   status: 422,
   url: "http://ml_server:8000/process_0"
 }}

the docker compose

version: '2.4'

networks:
  api_ml:
  ml_cache:
  api_database:

volumes:
  tmp_data:

services:
  # gunicorn
  ml_server:
    image: ml_server:latest
    command: main:api --timeout=1200 --bind=0.0.0.0:8000 --log-file=-
    ports:
      - 8000:8000
    networks:
      - api_ml
      - ml_cache
    environment:
      - REDIS_HOST=ml_cache
    volumes:
      # location where image are stored temporary after received
      - tmp_data:/opt/app/tmp_data

    api_server:
      image: api_server:latest
      networks:
        - api_ml
        - api_database
      ports:
        - 4000:4000
      env_file:
        - .env.server.api
      environment:
        - ML_HOST=http://ml_server:8000
        - PLUG_TMPDIR=/opt/app/plug_tmp

    # and other services...

When starting each server without docker, it works as expected and also when I send the image directly to ml_server after running the compose file. However, although the result are not expected but the api_server does sent the request but with 0 byte or empty image content just like the log above, and except sending the POST request with image file, normal request like GET does work from api_server to ml_server