From 8d5f482e135687747a4f115c5f17b109a74dce70 Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Sat, 21 Jan 2023 12:17:07 -0600 Subject: [PATCH 01/17] first pass of changes --- lib/ecto/adapters/sqlite3/connection.ex | 77 +- lib/ecto/adapters/sqlite3/data_type.ex | 19 +- mix.lock | 6 +- .../adapters/sqlite3/pg_connection_test.exs | 1937 +++++++++++++++++ 4 files changed, 1999 insertions(+), 40 deletions(-) create mode 100644 test/ecto/adapters/sqlite3/pg_connection_test.exs diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index 907f66d..7366b12 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -338,11 +338,11 @@ defmodule Ecto.Adapters.SQLite3.Connection do end end - defp build_explain_query(query, :query_plan) do + def build_explain_query(query, :query_plan) do IO.iodata_to_binary(["EXPLAIN QUERY PLAN ", query]) end - defp build_explain_query(query, :instructions) do + def build_explain_query(query, :instructions) do IO.iodata_to_binary(["EXPLAIN ", query]) end @@ -426,7 +426,13 @@ defmodule Ecto.Adapters.SQLite3.Connection do end @impl true - def execute_ddl({:drop, %Table{} = table, _mode}) do + def execute_ddl({:drop, %Table{} = table, mode}) do + if mode != [] do + IO.warn """ + `#{inspect(mode)}` is not supported for DROP TABLE with SQLite3 \ + DROP TABLE #{table.name} cannot have options set. + """, [] + end execute_ddl({:drop, table}) end @@ -508,7 +514,14 @@ defmodule Ecto.Adapters.SQLite3.Connection do end @impl true - def execute_ddl({:drop, %Index{} = index, _mode}) do + def execute_ddl({:drop, %Index{} = index, mode}) do + + if mode != [] do + IO.warn """ + `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ + DROP INDEX #{index.name} cannot have options set. + """, [] + end execute_ddl({:drop, index}) end @@ -818,15 +831,8 @@ defmodule Ecto.Adapters.SQLite3.Connection do def handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} def distinct(nil, _sources, _query), do: [] - def distinct(%QueryExpr{expr: true}, _sources, _query), do: "DISTINCT " def distinct(%QueryExpr{expr: false}, _sources, _query), do: [] - - def distinct(%QueryExpr{expr: expression}, _sources, query) - when is_list(expression) do - raise Ecto.QueryError, - query: query, - message: "DISTINCT with multiple columns is not supported by SQLite3" - end + def distinct(%QueryExpr{expr: _}, _sources, _query), do: "DISTINCT " def select(%{select: %{fields: fields}, distinct: distinct} = query, sources) do [ @@ -1064,14 +1070,25 @@ defmodule Ecto.Adapters.SQLite3.Connection do def order_by(%{order_bys: []}, _sources), do: [] def order_by(%{order_bys: order_bys} = query, sources) do + order_bys = Enum.flat_map(order_bys, & &1.expr) + + distinct = Map.get(query, :distinct, nil) + + order_bys = if distinct do + order_by_concat(List.wrap(distinct.expr), order_bys) + else + order_bys + end + [ " ORDER BY " - | intersperse_map(order_bys, ", ", fn %QueryExpr{expr: expression} -> - intersperse_map(expression, ", ", &order_by_expr(&1, sources, query)) - end) + | intersperse_map(order_bys, ", ", &order_by_expr(&1, sources, query)) ] end + defp order_by_concat([head | left], [head | right]), do: [head | order_by_concat(left, right)] + defp order_by_concat(left, right), do: left ++ right + defp order_by_expr({dir, expression}, sources, query) do str = expr(expression, sources, query) @@ -1117,10 +1134,10 @@ defmodule Ecto.Adapters.SQLite3.Connection do Enum.map(combinations, &combination/1) end - defp combination({:union, query}), do: [" UNION ", all(query)] - defp combination({:union_all, query}), do: [" UNION ALL ", all(query)] - defp combination({:except, query}), do: [" EXCEPT ", all(query)] - defp combination({:intersect, query}), do: [" INTERSECT ", all(query)] + defp combination({:union, query}), do: [" UNION (", all(query), ?)] + defp combination({:union_all, query}), do: [" UNION ALL (", all(query), ?)] + defp combination({:except, query}), do: [" EXCEPT (", all(query), ?)] + defp combination({:intersect, query}), do: [" INTERSECT (", all(query), ?)] defp combination({:except_all, query}) do raise Ecto.QueryError, @@ -1297,25 +1314,21 @@ defmodule Ecto.Adapters.SQLite3.Connection do def expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do [ - "CAST (", - "strftime('%Y-%m-%d %H:%M:%f000Z'", - ",", + "datetime(", expr(datetime, sources, query), ",", interval(count, interval, sources), - ") AS TEXT)" + ")" ] end def expr({:date_add, _, [date, count, interval]}, sources, query) do [ - "CAST (", - "strftime('%Y-%m-%d'", - ",", + "date(", expr(date, sources, query), ",", interval(count, interval, sources), - ") AS TEXT)" + ")" ] end @@ -1392,7 +1405,7 @@ defmodule Ecto.Adapters.SQLite3.Connection do def expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) when type in [:decimal, :float] do - ["(", expr(other, sources, query), " + 0)"] + ["CAST(", expr(other, sources, query), " AS REAL)"] end def expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) do @@ -1427,15 +1440,15 @@ defmodule Ecto.Adapters.SQLite3.Connection do end def interval(count, "millisecond", sources) do - "(#{expr(count, sources, nil)} / 1000.0) || ' seconds'" + "('#{expr(count, sources, nil)} / 1000.0) seconds'" end def interval(count, "week", sources) do - "(#{expr(count, sources, nil)} * 7) || ' days'" + "('#{expr(count, sources, nil)} * 7) days'" end def interval(count, interval, sources) do - "#{expr(count, sources, nil)} || ' #{interval}'" + "'#{expr(count, sources, nil)} #{interval}'" end defp op_to_binary({op, _, [_, _]} = expression, sources, query) @@ -1740,7 +1753,7 @@ defmodule Ecto.Adapters.SQLite3.Connection do end) if length(pks) > 1 do - composite_pk_expr = pks |> Enum.reverse() |> Enum.map_join(", ", "e_name/1) + composite_pk_expr = pks |> Enum.reverse() |> Enum.map_join(",", "e_name/1) { %{table | primary_key: :composite}, diff --git a/lib/ecto/adapters/sqlite3/data_type.ex b/lib/ecto/adapters/sqlite3/data_type.ex index 4d84b08..9713d37 100644 --- a/lib/ecto/adapters/sqlite3/data_type.ex +++ b/lib/ecto/adapters/sqlite3/data_type.ex @@ -16,14 +16,17 @@ defmodule Ecto.Adapters.SQLite3.DataType do def column_type(:string, _opts), do: "TEXT" def column_type(:float, _opts), do: "NUMERIC" def column_type(:binary, _opts), do: "BLOB" - def column_type(:map, _opts), do: "JSON" - def column_type(:array, _opts), do: "JSON" - def column_type({:map, _}, _opts), do: "JSON" - def column_type({:array, _}, _opts), do: "JSON" + def column_type(:map, _opts), do: "TEXT" + def column_type(:array, _opts), do: "TEXT" + def column_type({:map, _}, _opts), do: "TEXT" + def column_type({:array, _}, _opts), do: "TEXT" def column_type(:utc_datetime, _opts), do: "TEXT" def column_type(:utc_datetime_usec, _opts), do: "TEXT" def column_type(:naive_datetime, _opts), do: "TEXT" def column_type(:naive_datetime_usec, _opts), do: "TEXT" + def column_type(:time, _opts), do: "TEXT" + def column_type(:time_usec, _opts), do: "TEXT" + def column_type(:timestamp, _opts), do: "TEXT" def column_type(:decimal, nil), do: "DECIMAL" def column_type(:decimal, opts) do @@ -52,9 +55,15 @@ defmodule Ecto.Adapters.SQLite3.DataType do end end - def column_type(type, _) do + def column_type(type, _) when is_atom(type) do type |> Atom.to_string() |> String.upcase() end + + def column_type(type, _) do + raise ArgumentError, + "unsupported type `#{inspect(type)}`. The type can either be an atom, a string " <> + "or a tuple of the form `{:map, t}` or `{:array, t}` where `t` itself follows the same conditions." + end end diff --git a/mix.lock b/mix.lock index 51b85fd..3496f35 100644 --- a/mix.lock +++ b/mix.lock @@ -8,8 +8,8 @@ "decimal": {:hex, :decimal, "2.0.0", "a78296e617b0f5dd4c6caf57c714431347912ffb1d0842e998e9792b5642d697", [:mix], [], "hexpm", "34666e9c55dea81013e77d9d87370fe6cb6291d1ef32f46a1600230b1d44f577"}, "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, "earmark_parser": {:hex, :earmark_parser, "1.4.29", "149d50dcb3a93d9f3d6f3ecf18c918fb5a2d3c001b5d3305c926cddfbd33355b", [:mix], [], "hexpm", "4902af1b3eb139016aed210888748db8070b8125c2342ce3dcae4f38dcc63503"}, - "ecto": {:hex, :ecto, "3.9.2", "017db3bc786ff64271108522c01a5d3f6ba0aea5c84912cfb0dd73bf13684108", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "21466d5177e09e55289ac7eade579a642578242c7a3a9f91ad5c6583337a9d15"}, - "ecto_sql": {:hex, :ecto_sql, "3.9.1", "9bd5894eecc53d5b39d0c95180d4466aff00e10679e13a5cfa725f6f85c03c22", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.9.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.16.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "5fd470a4fff2e829bbf9dcceb7f3f9f6d1e49b4241e802f614de6b8b67c51118"}, + "ecto": {:hex, :ecto, "3.9.4", "3ee68e25dbe0c36f980f1ba5dd41ee0d3eb0873bccae8aeaf1a2647242bffa35", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "de5f988c142a3aa4ec18b85a4ec34a2390b65b24f02385c1144252ff6ff8ee75"}, + "ecto_sql": {:hex, :ecto_sql, "3.9.2", "34227501abe92dba10d9c3495ab6770e75e79b836d114c41108a4bf2ce200ad5", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.9.2", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.16.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "1eb5eeb4358fdbcd42eac11c1fbd87e3affd7904e639d77903c1358b2abd3f70"}, "elixir_make": {:hex, :elixir_make, "0.6.3", "bc07d53221216838d79e03a8019d0839786703129599e9619f4ab74c8c096eac", [:mix], [], "hexpm", "f5cbd651c5678bcaabdbb7857658ee106b12509cd976c2c2fca99688e1daf716"}, "ex_doc": {:hex, :ex_doc, "0.29.1", "b1c652fa5f92ee9cf15c75271168027f92039b3877094290a75abcaac82a9f77", [:mix], [{:earmark_parser, "~> 1.4.19", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "b7745fa6374a36daf484e2a2012274950e084815b936b1319aeebcf7809574f6"}, "exqlite": {:hex, :exqlite, "0.11.8", "b6bf596caa92d4811c303ddd8544dc9493a5351a18d2d16e1f85f977705cb39f", [:make, :mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:elixir_make, "~> 0.6", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "8591959284f1012c0d8cb26a09d5d9ba0bc0559cbeaf26fb8fbef14213d73bc0"}, @@ -22,6 +22,6 @@ "nimble_parsec": {:hex, :nimble_parsec, "1.2.3", "244836e6e3f1200c7f30cb56733fd808744eca61fd182f731eac4af635cc6d0b", [:mix], [], "hexpm", "c8d789e39b9131acf7b99291e93dae60ab48ef14a7ee9d58c6964f59efb570b0"}, "postgrex": {:hex, :postgrex, "0.16.5", "fcc4035cc90e23933c5d69a9cd686e329469446ef7abba2cf70f08e2c4b69810", [:mix], [{:connection, "~> 1.1", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "edead639dc6e882618c01d8fc891214c481ab9a3788dfe38dd5e37fd1d5fb2e8"}, "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, - "telemetry": {:hex, :telemetry, "1.1.0", "a589817034a27eab11144ad24d5c0f9fab1f58173274b1e9bae7074af9cbee51", [:rebar3], [], "hexpm", "b727b2a1f75614774cff2d7565b64d0dfa5bd52ba517f16543e6fc7efcc0df48"}, + "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, "temp": {:hex, :temp, "0.4.7", "2c78482cc2294020a4bc0c95950b907ff386523367d4e63308a252feffbea9f2", [:mix], [], "hexpm", "6af19e7d6a85a427478be1021574d1ae2a1e1b90882586f06bde76c63cd03e0d"}, } diff --git a/test/ecto/adapters/sqlite3/pg_connection_test.exs b/test/ecto/adapters/sqlite3/pg_connection_test.exs new file mode 100644 index 0000000..7b1e7fe --- /dev/null +++ b/test/ecto/adapters/sqlite3/pg_connection_test.exs @@ -0,0 +1,1937 @@ +defmodule Ecto.Adapters.SQLite3Test do + use ExUnit.Case, async: false + + import Ecto.Query + import ExUnit.CaptureIO + + + alias Ecto.Queryable + alias Ecto.Adapters.SQLite3.Connection, as: SQL + + defmodule Schema do + use Ecto.Schema + + schema "schema" do + field :x, :integer + field :y, :integer + field :z, :integer + field :w, {:array, :integer} + field :meta, :map + + has_many :comments, Ecto.Adapters.SQLite3Test.Schema2, + references: :x, + foreign_key: :z + has_one :permalink, Ecto.Adapters.SQLite3Test.Schema3, + references: :y, + foreign_key: :id + end + end + + defmodule Schema2 do + use Ecto.Schema + + schema "schema2" do + belongs_to :post, Ecto.Adapters.SQLite3Test.Schema, + references: :x, + foreign_key: :z + end + end + + defmodule Schema3 do + use Ecto.Schema + + schema "schema3" do + field :list1, {:array, :string} + field :list2, {:array, :integer} + field :binary, :binary + end + end + + defp plan(query, operation \\ :all) do + {query, _cast_params, _dump_params} = Ecto.Adapter.Queryable.plan_query(operation, Ecto.Adapters.SQLite3, query) + query + end + + defp all(query), do: query |> SQL.all |> IO.iodata_to_binary() + defp update_all(query), do: query |> SQL.update_all |> IO.iodata_to_binary() + defp delete_all(query), do: query |> SQL.delete_all |> IO.iodata_to_binary() + defp execute_ddl(query), do: query |> SQL.execute_ddl |> Enum.map(&IO.iodata_to_binary/1) + + defp insert(prefx, table, header, rows, on_conflict, returning, placeholders \\ []) do + IO.iodata_to_binary( + SQL.insert(prefx, table, header, rows, on_conflict, returning, placeholders) + ) + end + + defp update(prefx, table, fields, filter, returning) do + IO.iodata_to_binary(SQL.update(prefx, table, fields, filter, returning)) + end + + defp delete(prefx, table, filter, returning) do + IO.iodata_to_binary(SQL.delete(prefx, table, filter, returning)) + end + + test "from" do + query = Schema |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0} + end + + test "from with hints" do + query = Schema |> from(hints: "INDEXED BY FOO") |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 INDEXED BY FOO} + end + + test "from without schema" do + query = "posts" |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT p0."x" FROM "posts" AS p0} + + query = "posts" |> select([r], fragment("?", r)) |> plan() + assert all(query) == ~s{SELECT p0 FROM "posts" AS p0} + + query = "Posts" |> select([:x]) |> plan() + assert all(query) == ~s{SELECT P0."x" FROM "Posts" AS P0} + + query = "0posts" |> select([:x]) |> plan() + assert all(query) == ~s{SELECT t0."x" FROM "0posts" AS t0} + + assert_raise Ecto.QueryError, ~r"SQLite3 does not support selecting all fields from \"posts\" without a schema", fn -> + all from(p in "posts", select: p) |> plan() + end + end + + test "from with subquery" do + query = subquery("posts" |> select([r], %{x: r.x, y: r.y})) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM (SELECT sp0."x" AS "x", sp0."y" AS "y" FROM "posts" AS sp0) AS s0} + + query = subquery("posts" |> select([r], %{x: r.x, z: r.y})) |> select([r], r) |> plan() + assert all(query) == ~s{SELECT s0."x", s0."z" FROM (SELECT sp0."x" AS "x", sp0."y" AS "z" FROM "posts" AS sp0) AS s0} + + query = subquery(subquery("posts" |> select([r], %{x: r.x, z: r.y})) |> select([r], r)) |> select([r], r) |> plan() + assert all(query) == ~s{SELECT s0."x", s0."z" FROM (SELECT ss0."x" AS "x", ss0."z" AS "z" FROM (SELECT ssp0."x" AS "x", ssp0."y" AS "z" FROM "posts" AS ssp0) AS ss0) AS s0} + end + + test "from with fragment" do + query = from(f in fragment("select ? as x", ^"abc"), select: f.x) |> plan() + assert all(query) == ~s{SELECT f0."x" FROM (select ? as x) AS f0} + + query = from(fragment("select ? as x", ^"abc"), select: fragment("x")) |> plan() + assert all(query) == ~s{SELECT x FROM (select ? as x) AS f0} + + query = from(f in fragment("select_rows(arg)"), select: f.x) |> plan() + assert all(query) == ~s{SELECT f0."x" FROM (select_rows(arg)) AS f0} + + assert_raise Ecto.QueryError, ~r/^SQLite3 does not support/, fn -> + all from(f in fragment("select ? as x", ^"abc"), select: f) |> plan() + end + end + + test "CTE" do + initial_query = + "categories" + |> where([c], is_nil(c.parent_id)) + |> select([c], %{id: c.id, depth: fragment("1")}) + + iteration_query = + "categories" + |> join(:inner, [c], t in "tree", on: t.id == c.parent_id) + |> select([c, t], %{id: c.id, depth: fragment("? + 1", t.depth)}) + + cte_query = initial_query |> union_all(^iteration_query) + + query = + Schema + |> recursive_ctes(true) + |> with_cte("tree", as: ^cte_query) + |> join(:inner, [r], t in "tree", on: t.id == r.category_id) + |> select([r, t], %{x: r.x, category_id: t.id, depth: type(t.depth, :integer)}) + |> plan() + + assert all(query) == + ~s{WITH RECURSIVE "tree" AS } <> + ~s{(SELECT sc0."id" AS "id", 1 AS "depth" FROM "categories" AS sc0 WHERE (sc0."parent_id" IS NULL) } <> + ~s{UNION ALL } <> + ~s{(SELECT c0."id", t1."depth" + 1 FROM "categories" AS c0 } <> + ~s{INNER JOIN "tree" AS t1 ON t1."id" = c0."parent_id")) } <> + ~s{SELECT s0."x", t1."id", CAST(t1."depth" AS INTEGER) } <> + ~s{FROM "schema" AS s0 } <> + ~s{INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"} + end + + @raw_sql_cte """ + SELECT * FROM categories WHERE c.parent_id IS NULL + UNION ALL + SELECT * FROM categories AS c, category_tree AS ct WHERE ct.id = c.parent_id + """ + + test "reference CTE in union" do + comments_scope_query = + "comments" + |> where([c], is_nil(c.deleted_at)) + |> select([c], %{entity_id: c.entity_id, text: c.text}) + + posts_query = + "posts" + |> join(:inner, [p], c in "comments_scope", on: c.entity_id == p.guid) + |> select([p, c], [p.title, c.text]) + + videos_query = + "videos" + |> join(:inner, [v], c in "comments_scope", on: c.entity_id == v.guid) + |> select([v, c], [v.title, c.text]) + + query = + posts_query + |> union_all(^videos_query) + |> with_cte("comments_scope", as: ^comments_scope_query) + |> plan() + + assert all(query) == + ~s{WITH "comments_scope" AS (} <> + ~s{SELECT sc0."entity_id" AS "entity_id", sc0."text" AS "text" } <> + ~s{FROM "comments" AS sc0 WHERE (sc0."deleted_at" IS NULL)) } <> + ~s{SELECT p0."title", c1."text" } <> + ~s{FROM "posts" AS p0 } <> + ~s{INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = p0."guid" } <> + ~s{UNION ALL } <> + ~s{(SELECT v0."title", c1."text" } <> + ~s{FROM "videos" AS v0 } <> + ~s{INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = v0."guid")} + end + + test "fragment CTE" do + query = + Schema + |> recursive_ctes(true) + |> with_cte("tree", as: fragment(@raw_sql_cte)) + |> join(:inner, [p], c in "tree", on: c.id == p.category_id) + |> select([r], r.x) + |> plan() + + assert all(query) == + ~s{WITH RECURSIVE "tree" AS (#{@raw_sql_cte}) } <> + ~s{SELECT s0."x" } <> + ~s{FROM "schema" AS s0 } <> + ~s{INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"} + end + + # TODO should we warn about locks? or yell? + test "CTE update_all" do + cte_query = + from(x in Schema, order_by: [asc: :id], limit: 10, lock: "FOR UPDATE SKIP LOCKED", select: %{id: x.id}) + + query = + Schema + |> with_cte("target_rows", as: ^cte_query) + |> join(:inner, [row], target in "target_rows", on: target.id == row.id) + |> select([r, t], r) + |> update(set: [x: 123]) + |> plan(:update_all) + + assert update_all(query) == + ~s{WITH "target_rows" AS } <> + ~s{(SELECT ss0."id" AS "id" FROM "schema" AS ss0 ORDER BY ss0."id" LIMIT 10) } <> + ~s{UPDATE "schema" AS s0 } <> + ~s{SET "x" = 123 } <> + ~s{FROM "target_rows" AS t1 } <> + ~s{WHERE (t1."id" = s0."id") } <> + ~s{RETURNING "id", "x", "y", "z", "w", "meta"} + end + + # TODO Joins not supported with SQLite + """ + test "CTE delete_all" do + cte_query = + from(x in Schema, order_by: [asc: :id], limit: 10, lock: "FOR UPDATE SKIP LOCKED", select: %{id: x.id}) + + query = + Schema + |> with_cte("target_rows", as: ^cte_query) + |> join(:inner, [row], target in "target_rows", on: target.id == row.id) + |> select([r, t], r) + |> plan(:delete_all) + + assert delete_all(query) == + ~s{WITH "target_rows" AS } <> + ~s{(SELECT ss0."id" AS "id" FROM "schema" AS ss0 ORDER BY ss0."id" LIMIT 10 FOR UPDATE SKIP LOCKED) } <> + ~s{DELETE FROM "schema" AS s0 } <> + ~s{USING "target_rows" AS t1 } <> + ~s{WHERE (t1."id" = s0."id") } <> + ~s{RETURNING "id", "x", "y", "z", "w", "meta"} + end + """ + + test "parent binding subquery and CTE" do + initial_query = + "categories" + |> where([c], c.id == parent_as(:parent_category).id) + |> select([:id, :parent_id]) + + iteration_query = + "categories" + |> join(:inner, [c], t in "tree", on: t.parent_id == c.id) + |> select([:id, :parent_id]) + + cte_query = initial_query |> union_all(^iteration_query) + + breadcrumbs_query = + "tree" + |> recursive_ctes(true) + |> with_cte("tree", as: ^cte_query) + |> select([t], %{breadcrumbs: fragment("STRING_AGG(?, ' / ')", t.id)}) + + query = + from(c in "categories", + as: :parent_category, + left_lateral_join: b in subquery(breadcrumbs_query), + select: %{id: c.id, breadcrumbs: b.breadcrumbs} + ) + |> plan() + + assert_raise Ecto.QueryError, ~r/join `:left_lateral` not supported by SQLite3/, fn -> + all(query) + end + end + + test "select" do + query = Schema |> select([r], {r.x, r.y}) |> plan() + assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} + + query = Schema |> select([r], [r.x, r.y]) |> plan() + assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} + + query = Schema |> select([r], struct(r, [:x, :y])) |> plan() + assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} + end + + test "aggregates" do + query = Schema |> select([r], count(r.x)) |> plan() + assert all(query) == ~s{SELECT count(s0."x") FROM "schema" AS s0} + + query = Schema |> select([r], count(r.x, :distinct)) |> plan() + assert all(query) == ~s{SELECT count(DISTINCT s0."x") FROM "schema" AS s0} + + query = Schema |> select([r], count()) |> plan() + assert all(query) == ~s{SELECT count(*) FROM "schema" AS s0} + end + + test "aggregate filters" do + query = Schema |> select([r], count(r.x) |> filter(r.x > 10)) |> plan() + assert all(query) == ~s{SELECT count(s0."x") FILTER (WHERE s0."x" > 10) FROM "schema" AS s0} + + query = Schema |> select([r], count(r.x) |> filter(r.x > 10 and r.x < 50)) |> plan() + assert all(query) == ~s{SELECT count(s0."x") FILTER (WHERE (s0."x" > 10) AND (s0."x" < 50)) FROM "schema" AS s0} + + query = Schema |> select([r], count() |> filter(r.x > 10)) |> plan() + assert all(query) == ~s{SELECT count(*) FILTER (WHERE s0."x" > 10) FROM "schema" AS s0} + end + + test "distinct" do + query = Schema |> distinct([r], r.x) |> select([r], {r.x, r.y}) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} + + query = Schema |> distinct([r], desc: r.x) |> select([r], {r.x, r.y}) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} + + query = Schema |> distinct([r], 2) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x" FROM "schema" AS s0} + + query = Schema |> distinct([r], [r.x, r.y]) |> select([r], {r.x, r.y}) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} + + query = Schema |> distinct([r], [asc: r.x, desc: r.y]) |> select([r], {r.x, r.y}) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} + + query = Schema |> distinct([r], [asc_nulls_first: r.x, desc_nulls_last: r.y]) |> select([r], {r.x, r.y}) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} + + query = Schema |> distinct([r], true) |> select([r], {r.x, r.y}) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} + + query = Schema |> distinct([r], false) |> select([r], {r.x, r.y}) |> plan() + assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} + + query = Schema |> distinct(true) |> select([r], {r.x, r.y}) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} + + query = Schema |> distinct(false) |> select([r], {r.x, r.y}) |> plan() + assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} + end + + test "distinct with order by" do + query = Schema |> order_by([r], [r.y]) |> distinct([r], desc: r.x) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x" FROM "schema" AS s0 ORDER BY s0."x" DESC, s0."y"} + + query = Schema |> order_by([r], [r.y]) |> distinct([r], desc_nulls_last: r.x) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x" FROM "schema" AS s0 ORDER BY s0."x" DESC NULLS LAST, s0."y"} + + # Duplicates + query = Schema |> order_by([r], desc: r.x) |> distinct([r], desc: r.x) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT DISTINCT s0."x" FROM "schema" AS s0 ORDER BY s0."x" DESC} + + assert Schema + |> order_by([r], desc: r.x) + |> distinct([r], desc: r.x) + |> select([r], r.x) + |> plan() + |> all() == + ~s{SELECT DISTINCT s0."x" FROM "schema" AS s0 ORDER BY s0."x" DESC} + end + + test "coalesce" do + query = Schema |> select([s], coalesce(s.x, 5)) |> plan() + assert all(query) == ~s{SELECT coalesce(s0."x", 5) FROM "schema" AS s0} + end + + test "where" do + query = Schema |> where([r], r.x == 42) |> where([r], r.y != 43) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WHERE (s0."x" = 42) AND (s0."y" != 43)} + + query = Schema |> where([r], {r.x, r.y} > {1, 2}) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WHERE ((s0."x",s0."y") > (1,2))} + end + + test "or_where" do + query = Schema |> or_where([r], r.x == 42) |> or_where([r], r.y != 43) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WHERE (s0."x" = 42) OR (s0."y" != 43)} + + query = Schema |> or_where([r], r.x == 42) |> or_where([r], r.y != 43) |> where([r], r.z == 44) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WHERE ((s0."x" = 42) OR (s0."y" != 43)) AND (s0."z" = 44)} + end + + test "order by" do + query = Schema |> order_by([r], r.x) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x"} + + query = Schema |> order_by([r], [r.x, r.y]) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x", s0."y"} + + query = Schema |> order_by([r], [asc: r.x, desc: r.y]) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x", s0."y" DESC} + + query = Schema |> order_by([r], [asc_nulls_first: r.x, desc_nulls_first: r.y]) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x" ASC NULLS FIRST, s0."y" DESC NULLS FIRST} + + query = Schema |> order_by([r], [asc_nulls_last: r.x, desc_nulls_last: r.y]) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x" ASC NULLS LAST, s0."y" DESC NULLS LAST} + + query = Schema |> order_by([r], []) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0} + end + + test "union and union all" do + base_query = Schema |> select([r], r.x) |> order_by(fragment("rand")) |> offset(10) |> limit(5) + union_query1 = Schema |> select([r], r.y) |> order_by([r], r.y) |> offset(20) |> limit(40) + union_query2 = Schema |> select([r], r.z) |> order_by([r], r.z) |> offset(30) |> limit(60) + + query = base_query |> union(^union_query1) |> union(^union_query2) |> plan() + + assert all(query) == + ~s{SELECT s0."x" FROM "schema" AS s0 } <> + ~s{UNION (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> + ~s{UNION (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{ORDER BY rand LIMIT 5 OFFSET 10} + + query = base_query |> union_all(^union_query1) |> union_all(^union_query2) |> plan() + + assert all(query) == + ~s{SELECT s0."x" FROM "schema" AS s0 } <> + ~s{UNION ALL (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> + ~s{UNION ALL (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{ORDER BY rand LIMIT 5 OFFSET 10} + end + + test "except and except all" do + base_query = Schema |> select([r], r.x) |> order_by(fragment("rand")) |> offset(10) |> limit(5) + except_query1 = Schema |> select([r], r.y) |> order_by([r], r.y) |> offset(20) |> limit(40) + except_query2 = Schema |> select([r], r.z) |> order_by([r], r.z) |> offset(30) |> limit(60) + + query = base_query |> except(^except_query1) |> except(^except_query2) |> plan() + + assert all(query) == + ~s{SELECT s0."x" FROM "schema" AS s0 } <> + ~s{EXCEPT (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> + ~s{EXCEPT (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{ORDER BY rand LIMIT 5 OFFSET 10} + + query = base_query |> except_all(^except_query1) |> except_all(^except_query2) |> plan() + + assert_raise Ecto.QueryError, fn -> + all(query) + end + end + + test "intersect and intersect all" do + base_query = Schema |> select([r], r.x) |> order_by(fragment("rand")) |> offset(10) |> limit(5) + intersect_query1 = Schema |> select([r], r.y) |> order_by([r], r.y) |> offset(20) |> limit(40) + intersect_query2 = Schema |> select([r], r.z) |> order_by([r], r.z) |> offset(30) |> limit(60) + + query = base_query |> intersect(^intersect_query1) |> intersect(^intersect_query2) |> plan() + + assert all(query) == + ~s{SELECT s0."x" FROM "schema" AS s0 } <> + ~s{INTERSECT (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> + ~s{INTERSECT (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{ORDER BY rand LIMIT 5 OFFSET 10} + + query = + base_query |> intersect_all(^intersect_query1) |> intersect_all(^intersect_query2) |> plan() + + assert_raise Ecto.QueryError, fn -> + all(query) + end + end + + test "limit and offset" do + query = Schema |> limit([r], 3) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 LIMIT 3} + + query = Schema |> offset([r], 5) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 OFFSET 5} + + query = Schema |> offset([r], 5) |> limit([r], 3) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 LIMIT 3 OFFSET 5} + end + + test "lock" do + assert_raise ArgumentError, "locks are not supported by SQLite3", fn -> + Schema |> lock("FOR SHARE NOWAIT") |> select([], true) |> plan() |> all() + end + + assert_raise ArgumentError, "locks are not supported by SQLite3", fn -> + Schema |> lock([p], fragment("UPDATE on ?", p)) |> select([], true) |> plan() |> all() + end + + end + + test "string escape" do + query = "schema" |> where(foo: "'\\ ") |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM \"schema\" AS s0 WHERE (s0.\"foo\" = '''\\\\ ')} + + query = "schema" |> where(foo: "'") |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = '''')} + end + + test "binary ops" do + query = Schema |> select([r], r.x == 2) |> plan() + assert all(query) == ~s{SELECT s0."x" = 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x != 2) |> plan() + assert all(query) == ~s{SELECT s0."x" != 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x <= 2) |> plan() + assert all(query) == ~s{SELECT s0."x" <= 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x >= 2) |> plan() + assert all(query) == ~s{SELECT s0."x" >= 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x < 2) |> plan() + assert all(query) == ~s{SELECT s0."x" < 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x > 2) |> plan() + assert all(query) == ~s{SELECT s0."x" > 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x + 2) |> plan() + assert all(query) == ~s{SELECT s0."x" + 2 FROM "schema" AS s0} + end + + test "is_nil" do + query = Schema |> select([r], is_nil(r.x)) |> plan() + assert all(query) == ~s{SELECT s0."x" IS NULL FROM "schema" AS s0} + + query = Schema |> select([r], not is_nil(r.x)) |> plan() + assert all(query) == ~s{SELECT NOT (s0."x" IS NULL) FROM "schema" AS s0} + + query = "schema" |> select([r], r.x == is_nil(r.y)) |> plan() + assert all(query) == ~s{SELECT s0."x" = (s0."y" IS NULL) FROM "schema" AS s0} + end + + test "fragments" do + query = Schema |> select([r], fragment("now")) |> plan() + assert all(query) == ~s{SELECT now FROM "schema" AS s0} + + query = Schema |> select([r], fragment("fun(?)", r)) |> plan() + assert all(query) == ~s{SELECT fun(s0) FROM "schema" AS s0} + + query = Schema |> select([r], fragment("downcase(?)", r.x)) |> plan() + assert all(query) == ~s{SELECT downcase(s0."x") FROM "schema" AS s0} + + query = Schema |> select([r], fragment("? COLLATE ?", r.x, literal(^"es_ES"))) |> plan() + assert all(query) == ~s{SELECT s0."x" COLLATE "es_ES" FROM "schema" AS s0} + + value = 13 + query = Schema |> select([r], fragment("downcase(?, ?)", r.x, ^value)) |> plan() + assert all(query) == ~s{SELECT downcase(s0."x", ?) FROM "schema" AS s0} + + query = Schema |> select([], fragment(title: 2)) |> plan() + assert_raise Ecto.QueryError, fn -> + all(query) + end + end + + test "literals" do + query = "schema" |> where(foo: true) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 1)} + + query = "schema" |> where(foo: false) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 0)} + + query = "schema" |> where(foo: "abc") |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 'abc')} + + query = "schema" |> where(foo: <<0,?a,?b,?c>>) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = x'00616263')} + + query = "schema" |> where(foo: 123) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 123)} + + query = "schema" |> where(foo: 123.0) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = CAST(123.0 AS REAL))} + end + + test "aliasing a selected value with selected_as/2" do + query = "schema" |> select([s], selected_as(s.x, :integer)) |> plan() + assert all(query) == ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0} + + query = "schema" |> select([s], s.x |> coalesce(0) |> sum() |> selected_as(:integer)) |> plan() + assert all(query) == ~s{SELECT sum(coalesce(s0."x", 0)) AS "integer" FROM "schema" AS s0} + end + + test "group_by can reference the alias of a selected value with selected_as/1" do + query = "schema" |> select([s], selected_as(s.x, :integer)) |> group_by(selected_as(:integer)) |> plan() + assert all(query) == ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0 GROUP BY "integer"} + end + + test "order_by can reference the alias of a selected value with selected_as/1" do + query = "schema" |> select([s], selected_as(s.x, :integer)) |> order_by(selected_as(:integer)) |> plan() + assert all(query) == ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0 ORDER BY "integer"} + + query = "schema" |> select([s], selected_as(s.x, :integer)) |> order_by([desc: selected_as(:integer)]) |> plan() + assert all(query) == ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0 ORDER BY "integer" DESC} + end + + test "datetime_add" do + query = "schema" |> where([s], datetime_add(s.foo, 1, "month") > s.bar) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (datetime(s0."foo",'1 month') > s0."bar")} + + query = "schema" |> where([s], datetime_add(type(s.foo, :string), 1, "month") > s.bar) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (datetime(CAST(s0."foo" AS TEXT),'1 month') > s0."bar")} + end + + test "tagged type" do + query = Schema |> select([t], type(t.x + t.y, :integer)) |> plan() + assert all(query) == ~s{SELECT CAST(s0."x" + s0."y" AS INTEGER) FROM "schema" AS s0} + + query = Schema |> select([], type(^"601d74e4-a8d3-4b6e-8365-eddb4c893327", Ecto.UUID)) |> plan() + assert all(query) == ~s{SELECT CAST(? AS TEXT) FROM "schema" AS s0} + + # TODO Arrays not supported? + query = Schema |> select([], type(^["601d74e4-a8d3-4b6e-8365-eddb4c893327"], {:array, Ecto.UUID})) |> plan() + assert all(query) == ~s{SELECT CAST(? AS TEXT) FROM "schema" AS s0} + end + + test "json_extract_path" do + query = Schema |> select([s], json_extract_path(s.meta, [0, 1])) |> plan() + assert all(query) == ~s|SELECT json_extract(s0.\"meta\", '$[0][1]') FROM "schema" AS s0| + + query = Schema |> select([s], json_extract_path(s.meta, ["a", "b"])) |> plan() + assert all(query) == ~s|SELECT json_extract(s0.\"meta\", '$.a.b') FROM "schema" AS s0| + + query = Schema |> select([s], json_extract_path(s.meta, ["'a"])) |> plan() + assert all(query) == ~s|SELECT json_extract(s0.\"meta\", '$.''a') FROM "schema" AS s0| + + query = Schema |> select([s], json_extract_path(s.meta, ["\"a"])) |> plan() + assert all(query) == ~s|SELECT json_extract(s0.\"meta\", '$.\\\"a') FROM "schema" AS s0| + end + + test "optimized json_extract_path" do + query = Schema |> where([s], s.meta["id"] == 123) |> select(true) |> plan() + assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0.\"meta\", '$.id') = 123)| + + query = Schema |> where([s], s.meta["id"] == "123") |> select(true) |> plan() + assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.id') = '123')| + + query = Schema |> where([s], s.meta["tags"][0]["name"] == "123") |> select(true) |> plan() + assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.tags[0].name') = '123')| + + query = Schema |> where([s], s.meta[0] == "123") |> select(true) |> plan() + assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0.\"meta\", '$[0]') = '123')| + + query = Schema |> where([s], s.meta["enabled"] == true) |> select(true) |> plan() + assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.enabled') = 1)| + + query = Schema |> where([s], s.meta["extra"][0]["enabled"] == false) |> select(true) |> plan() + assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.extra[0].enabled') = 0)| + end + + test "nested expressions" do + z = 123 + query = from(r in Schema, []) |> select([r], r.x > 0 and (r.y > ^(-z)) or true) |> plan() + assert all(query) == ~s{SELECT ((s0."x" > 0) AND (s0."y" > ?)) OR 1 FROM "schema" AS s0} + end + + test "in expression" do + query = Schema |> select([e], 1 in []) |> plan() + assert all(query) == ~s{SELECT false FROM "schema" AS s0} + + query = Schema |> select([e], 1 in [1,e.x,3]) |> plan() + assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} + + query = Schema |> select([e], 1 in ^[]) |> plan() + assert all(query) == ~s{SELECT 1 = ANY(?) FROM "schema" AS s0} + + query = Schema |> select([e], 1 in ^[1, 2, 3]) |> plan() + assert all(query) == ~s{SELECT 1 = ANY(?) FROM "schema" AS s0} + + query = Schema |> select([e], 1 in [1, ^2, 3]) |> plan() + assert all(query) == ~s{SELECT 1 IN (1,?,3) FROM "schema" AS s0} + + query = Schema |> select([e], ^1 in [1, ^2, 3]) |> plan() + assert all(query) == ~s{SELECT ? IN (1,?,3) FROM "schema" AS s0} + + query = Schema |> select([e], ^1 in ^[1, 2, 3]) |> plan() + assert all(query) == ~s{SELECT ? = ANY(?) FROM "schema" AS s0} + + query = Schema |> select([e], 1 in e.w) |> plan() + assert all(query) == ~s{SELECT 1 = ANY(s0."w") FROM "schema" AS s0} + + query = Schema |> select([e], 1 in fragment("foo")) |> plan() + assert all(query) == ~s{SELECT 1 = ANY(foo) FROM "schema" AS s0} + + query = Schema |> select([e], e.x == ^0 or e.x in ^[1, 2, 3] or e.x == ^4) |> plan() + assert all(query) == ~s{SELECT ((s0."x" = ?) OR s0."x" = ANY(?)) OR (s0."x" = ?) FROM "schema" AS s0} + end + + test "in subquery" do + posts = subquery("posts" |> where(title: ^"hello") |> select([p], p.id)) + query = "comments" |> where([c], c.post_id in subquery(posts)) |> select([c], c.x) |> plan() + assert all(query) == + ~s{SELECT c0."x" FROM "comments" AS c0 } <> + ~s{WHERE (c0."post_id" IN (SELECT sp0."id" FROM "posts" AS sp0 WHERE (sp0."title" = ?)))} + + posts = subquery("posts" |> where(title: parent_as(:comment).subtitle) |> select([p], p.id)) + query = "comments" |> from(as: :comment) |> where([c], c.post_id in subquery(posts)) |> select([c], c.x) |> plan() + assert all(query) == + ~s{SELECT c0."x" FROM "comments" AS c0 } <> + ~s{WHERE (c0."post_id" IN (SELECT sp0."id" FROM "posts" AS sp0 WHERE (sp0."title" = c0."subtitle")))} + end + + test "having" do + query = Schema |> having([p], p.x == p.x) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x")} + + query = Schema |> having([p], p.x == p.x) |> having([p], p.y == p.y) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x") AND (s0."y" = s0."y")} + end + + test "or_having" do + query = Schema |> or_having([p], p.x == p.x) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x")} + + query = Schema |> or_having([p], p.x == p.x) |> or_having([p], p.y == p.y) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x") OR (s0."y" = s0."y")} + end + + test "group by" do + query = Schema |> group_by([r], r.x) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY s0."x"} + + query = Schema |> group_by([r], 2) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY 2} + + query = Schema |> group_by([r], [r.x, r.y]) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY s0."x", s0."y"} + + query = Schema |> group_by([r], []) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0} + end + + + # TODO Add Array support via json + """ + test "arrays and sigils" do + query = Schema |> select([], fragment("?", [1, 2, 3])) |> plan() + assert all(query) == ~s{SELECT JSON_ARRAY('[1,2,3]') FROM "schema" AS s0} + + query = Schema |> select([], fragment("?", ~w(abc def))) |> plan() + assert all(query) == ~s{SELECT JSON_ARRAY('["abc","def"]') FROM "schema" AS s0} + + query = Schema |> where([s], s.w == []) |> select([s], s.w) |> plan() + assert all(query) == ~s{SELECT s0."w" FROM "schema" AS s0 WHERE (s0."w" = '\{\}')} + end + """ + + + test "interpolated values" do + cte1 = "schema1" |> select([m], %{id: m.id, smth: ^true}) |> where([], fragment("?", ^1)) + union = "schema1" |> select([m], {m.id, ^true}) |> where([], fragment("?", ^5)) + union_all = "schema2" |> select([m], {m.id, ^false}) |> where([], fragment("?", ^6)) + + query = "schema" + |> with_cte("cte1", as: ^cte1) + |> with_cte("cte2", as: fragment("SELECT * FROM schema WHERE ?", ^2)) + |> select([m], {m.id, ^true}) + |> join(:inner, [], Schema2, on: fragment("?", ^true)) + |> join(:inner, [], Schema2, on: fragment("?", ^false)) + |> where([], fragment("?", ^true)) + |> where([], fragment("?", ^false)) + |> having([], fragment("?", ^true)) + |> having([], fragment("?", ^false)) + |> group_by([], fragment("?", ^3)) + |> group_by([], fragment("?", ^4)) + |> union(^union) + |> union_all(^union_all) + |> order_by([], fragment("?", ^7)) + |> limit([], ^8) + |> offset([], ^9) + |> plan() + + result = + "WITH \"cte1\" AS (SELECT ss0.\"id\" AS \"id\", ? AS \"smth\" FROM \"schema1\" AS ss0 WHERE (?)), " <> + "\"cte2\" AS (SELECT * FROM schema WHERE ?) " <> + "SELECT s0.\"id\", ? FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON ? " <> + "INNER JOIN \"schema2\" AS s2 ON ? WHERE (?) AND (?) " <> + "GROUP BY ?, ? HAVING (?) AND (?) " <> + "UNION (SELECT s0.\"id\", ? FROM \"schema1\" AS s0 WHERE (?)) " <> + "UNION ALL (SELECT s0.\"id\", ? FROM \"schema2\" AS s0 WHERE (?)) " <> + "ORDER BY ? LIMIT ? OFFSET ?" + + assert all(query) == String.trim(result) + end + + test "order_by and types" do + query = "schema3" |> order_by([e], type(fragment("?", e.binary), ^:decimal)) |> select(true) |> plan() + assert all(query) == "SELECT 1 FROM \"schema3\" AS s0 ORDER BY CAST(s0.\"binary\" AS REAL)" + end + + test "fragments and types" do + query = + plan from(e in "schema", + where: fragment("extract(? from ?) = ?", ^"month", e.start_time, type(^"4", :integer)), + where: fragment("extract(? from ?) = ?", ^"year", e.start_time, type(^"2015", :integer)), + select: true) + + result = + "SELECT 1 FROM \"schema\" AS s0 " <> + "WHERE (extract(? from s0.\"start_time\") = CAST(? AS INTEGER)) " <> + "AND (extract(? from s0.\"start_time\") = CAST(? AS INTEGER))" + + assert all(query) == String.trim(result) + end + + test "fragments allow ? to be escaped with backslash" do + query = + plan from(e in "schema", + where: fragment("? = \"query\\?\"", e.start_time), + select: true) + + result = + "SELECT 1 FROM \"schema\" AS s0 " <> + "WHERE (s0.\"start_time\" = \"query?\")" + + assert all(query) == String.trim(result) + end + + test "build_explain_query" do + assert SQL.build_explain_query("SELECT 1", :query_plan) == "EXPLAIN QUERY PLAN SELECT 1" + assert SQL.build_explain_query("SELECT 1", :instructions) == "EXPLAIN SELECT 1" + end + + ## *_all + + test "update all" do + query = from(m in Schema, update: [set: [x: 0]]) |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = 0} + + query = from(m in Schema, update: [set: [x: 0], inc: [y: 1, z: -3]]) |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = 0, "y" = "y" + 1, "z" = "z" + -3} + + query = from(e in Schema, where: e.x == 123, update: [set: [x: 0]]) |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = 0 WHERE (s0."x" = 123)} + + query = from(m in Schema, update: [set: [x: ^0]]) |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = ?} + + query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) + |> update([_], set: [x: 0]) |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = 0 FROM "schema2" AS s1 WHERE (s0."x" = s1."z")} + + query = from(e in Schema, where: e.x == 123, update: [set: [x: 0]], + join: q in Schema2, on: e.x == q.z) |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = 0 FROM "schema2" AS s1 } <> + ~s{WHERE (s0."x" = s1."z") AND (s0."x" = 123)} + end + + test "update all with returning" do + query = from(m in Schema, update: [set: [x: 0]]) |> select([m], m) |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = 0 RETURNING "id", "x", "y", "z", "w", "meta"} + + query = from(m in Schema, update: [set: [x: ^1]]) |> where([m], m.x == ^2) |> select([m], m.x == ^3) |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = ? WHERE (s0."x" = ?) RETURNING "x" = ?} + end + + # TODO We don;t have actual arrays so probably no? + test "update all array ops" do + query = from(m in Schema, update: [push: [w: 0]]) |> plan(:update_all) + assert_raise Ecto.QueryError, fn -> + update_all(query) + end + + query = from(m in Schema, update: [pull: [w: 0]]) |> plan(:update_all) + assert_raise Ecto.QueryError, fn -> + update_all(query) + end + end + + test "update all with subquery" do + sub = from(p in Schema, where: p.x > ^10) + + query = + Schema + |> join(:inner, [p], p2 in subquery(sub), on: p.id == p2.id) + |> update([_], set: [x: ^100]) + + {planned_query, cast_params, dump_params} = + Ecto.Adapter.Queryable.plan_query(:update_all, Ecto.Adapters.SQLite3, query) + + assert update_all(planned_query) == + ~s{UPDATE "schema" AS s0 SET "x" = ? FROM } <> + ~s{(SELECT ss0."id" AS "id", ss0."x" AS "x", ss0."y" AS "y", ss0."z" AS "z", ss0."w" AS "w", ss0."meta" AS "meta" FROM "schema" AS ss0 WHERE (ss0."x" > ?)) } <> + ~s{AS s1 WHERE (s0."id" = s1."id")} + + assert cast_params == [100, 10] + assert dump_params == [100, 10] + end + + test "update all with prefix" do + query = from(m in Schema, update: [set: [x: 0]]) |> Map.put(:prefix, "prefix") |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "prefix"."schema" AS s0 SET "x" = 0} + + query = from(m in Schema, prefix: "first", update: [set: [x: 0]]) |> Map.put(:prefix, "prefix") |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "first"."schema" AS s0 SET "x" = 0} + end + + # TODO should raise? + """ + test "update all with left join" do + query = from(m in Schema, join: x in assoc(m, :comments), left_join: p in assoc(m, :permalink), update: [set: [w: m.list2]]) |> plan(:update_all) + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "w" = s0."list2" FROM "schema2" AS s1 LEFT OUTER JOIN "schema3" AS s2 ON s2."id" = s0."y" WHERE (s1."z" = s0."x")} + end + + test "update all with left join but no inner join" do + query = from(m in Schema, left_join: p in assoc(m, :permalink), left_join: x in assoc(m, :permalink), update: [set: [w: m.list2]]) |> plan(:update_all) + assert update_all(query) == ~s{select} + end + """ + + test "delete all" do + query = Schema |> Queryable.to_query |> plan() + assert delete_all(query) == ~s{DELETE FROM "schema" AS s0} + + query = from(e in Schema, where: e.x == 123) |> plan() + assert delete_all(query) == + ~s{DELETE FROM "schema" AS s0 WHERE (s0."x" = 123)} + + query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) |> plan() + assert_raise ArgumentError, fn -> + delete_all(query) + end + + query = from(e in Schema, where: e.x == 123, join: q in Schema2, on: e.x == q.z) |> plan() + assert_raise ArgumentError, fn -> + delete_all(query) + end + + query = from(e in Schema, where: e.x == 123, join: assoc(e, :comments), join: assoc(e, :permalink)) |> plan() + assert_raise ArgumentError, fn -> + delete_all(query) + end + end + + test "delete all with returning" do + query = Schema |> Queryable.to_query |> select([m], m) |> plan() + assert delete_all(query) == ~s{DELETE FROM "schema" AS s0 RETURNING "id", "x", "y", "z", "w", "meta"} + end + + test "delete all with prefix" do + query = Schema |> Queryable.to_query |> Map.put(:prefix, "prefix") |> plan() + assert delete_all(query) == ~s{DELETE FROM "prefix"."schema" AS s0} + + query = Schema |> from(prefix: "first") |> Map.put(:prefix, "prefix") |> plan() + assert delete_all(query) == ~s{DELETE FROM "first"."schema" AS s0} + end + + ## Partitions and windows + + describe "windows and partitions" do + test "one window" do + query = Schema + |> select([r], r.x) + |> windows([r], w: [partition_by: r.x]) + |> plan + + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x")} + end + + test "two windows" do + query = Schema + |> select([r], r.x) + |> windows([r], w1: [partition_by: r.x], w2: [partition_by: r.y]) + |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WINDOW "w1" AS (PARTITION BY s0."x"), "w2" AS (PARTITION BY s0."y")} + end + + test "count over window" do + query = Schema + |> windows([r], w: [partition_by: r.x]) + |> select([r], count(r.x) |> over(:w)) + |> plan() + assert all(query) == ~s{SELECT count(s0."x") OVER "w" FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x")} + end + + test "count over all" do + query = Schema + |> select([r], count(r.x) |> over) + |> plan() + assert all(query) == ~s{SELECT count(s0."x") OVER () FROM "schema" AS s0} + end + + test "row_number over all" do + query = Schema + |> select(row_number |> over) + |> plan() + assert all(query) == ~s{SELECT row_number() OVER () FROM "schema" AS s0} + end + + test "nth_value over all" do + query = Schema + |> select([r], nth_value(r.x, 42) |> over) + |> plan() + assert all(query) == ~s{SELECT nth_value(s0."x", 42) OVER () FROM "schema" AS s0} + end + + test "lag/2 over all" do + query = Schema + |> select([r], lag(r.x, 42) |> over) + |> plan() + assert all(query) == ~s{SELECT lag(s0."x", 42) OVER () FROM "schema" AS s0} + end + + test "custom aggregation over all" do + query = Schema + |> select([r], fragment("custom_function(?)", r.x) |> over) + |> plan() + assert all(query) == ~s{SELECT custom_function(s0."x") OVER () FROM "schema" AS s0} + end + + test "partition by and order by on window" do + query = Schema + |> windows([r], w: [partition_by: [r.x, r.z], order_by: r.x]) + |> select([r], r.x) + |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x", s0."z" ORDER BY s0."x")} + end + + test "partition by ond order by over" do + query = Schema + |> select([r], count(r.x) |> over(partition_by: [r.x, r.z], order_by: r.x)) + + query = query |> plan() + assert all(query) == ~s{SELECT count(s0."x") OVER (PARTITION BY s0."x", s0."z" ORDER BY s0."x") FROM "schema" AS s0} + end + + test "frame clause" do + query = Schema + |> select([r], count(r.x) |> over(partition_by: [r.x, r.z], order_by: r.x, frame: fragment("ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING"))) + + query = query |> plan() + assert all(query) == ~s{SELECT count(s0."x") OVER (PARTITION BY s0."x", s0."z" ORDER BY s0."x" ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) FROM "schema" AS s0} + end + end + + ## Joins + + test "join" do + query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) |> select([], true) |> plan() + assert all(query) == + ~s{SELECT 1 FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON s0."x" = s1."z"} + + query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) + |> join(:inner, [], Schema, on: true) |> select([], true) |> plan() + assert all(query) == + ~s{SELECT 1 FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON s0."x" = s1."z" } <> + ~s{INNER JOIN "schema" AS s2 ON 1} + end + + #TODO ahh probably should raise + """ + test "join with hints" do + assert_raise Ecto.QueryError, ~r/table hints are not supported by PostgreSQL/, fn -> + Schema + |> join(:inner, [p], q in Schema2, hints: ["USE INDEX FOO", "USE INDEX BAR"]) + |> select([], true) + |> plan() + |> all() + end + end + """ + + test "join with nothing bound" do + query = Schema |> join(:inner, [], q in Schema2, on: q.z == q.z) |> select([], true) |> plan() + assert all(query) == + ~s{SELECT 1 FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON s1."z" = s1."z"} + end + + test "join without schema" do + query = "posts" |> join(:inner, [p], q in "comments", on: p.x == q.z) |> select([], true) |> plan() + assert all(query) == + ~s{SELECT 1 FROM "posts" AS p0 INNER JOIN "comments" AS c1 ON p0."x" = c1."z"} + end + + test "join with subquery" do + posts = subquery("posts" |> where(title: ^"hello") |> select([r], %{x: r.x, y: r.y})) + query = "comments" |> join(:inner, [c], p in subquery(posts), on: true) |> select([_, p], p.x) |> plan() + assert all(query) == + ~s{SELECT s1."x" FROM "comments" AS c0 } <> + ~s{INNER JOIN (SELECT sp0."x" AS "x", sp0."y" AS "y" FROM "posts" AS sp0 WHERE (sp0."title" = ?)) AS s1 ON 1} + + posts = subquery("posts" |> where(title: ^"hello") |> select([r], %{x: r.x, z: r.y})) + query = "comments" |> join(:inner, [c], p in subquery(posts), on: true) |> select([_, p], p) |> plan() + assert all(query) == + ~s{SELECT s1."x", s1."z" FROM "comments" AS c0 } <> + ~s{INNER JOIN (SELECT sp0."x" AS "x", sp0."y" AS "z" FROM "posts" AS sp0 WHERE (sp0."title" = ?)) AS s1 ON 1} + + posts = subquery("posts" |> where(title: parent_as(:comment).subtitle) |> select([r], r.title)) + query = "comments" |> from(as: :comment) |> join(:inner, [c], p in subquery(posts)) |> select([_, p], p) |> plan() + assert all(query) == + ~s{SELECT s1."title" FROM "comments" AS c0 } <> + ~s{INNER JOIN (SELECT sp0."title" AS "title" FROM "posts" AS sp0 WHERE (sp0."title" = c0."subtitle")) AS s1 ON 1} + end + + test "join with prefix" do + query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) |> select([], true) |> Map.put(:prefix, "prefix") |> plan() + assert all(query) == + ~s{SELECT 1 FROM "prefix"."schema" AS s0 INNER JOIN "prefix"."schema2" AS s1 ON s0."x" = s1."z"} + + query = Schema |> from(prefix: "first") |> join(:inner, [p], q in Schema2, on: p.x == q.z, prefix: "second") |> select([], true) |> Map.put(:prefix, "prefix") |> plan() + assert all(query) == + ~s{SELECT 1 FROM "first"."schema" AS s0 INNER JOIN "second"."schema2" AS s1 ON s0."x" = s1."z"} + end + + test "join with fragment" do + query = Schema + |> join(:inner, [p], q in fragment("SELECT * FROM schema2 AS s2 WHERE s2.id = ? AND s2.field = ?", p.x, ^10)) + |> select([p], {p.id, ^0}) + |> where([p], p.id > 0 and p.id < ^100) + |> plan() + assert all(query) == + ~s{SELECT s0."id", ? FROM "schema" AS s0 INNER JOIN } <> + ~s{(SELECT * FROM schema2 AS s2 WHERE s2.id = s0."x" AND s2.field = ?) AS f1 ON 1 } <> + ~s{WHERE ((s0."id" > 0) AND (s0."id" < ?))} + end + + test "join with fragment and on defined" do + query = Schema + |> join(:inner, [p], q in fragment("SELECT * FROM schema2"), on: q.id == p.id) + |> select([p], {p.id, ^0}) + |> plan() + assert all(query) == + ~s{SELECT s0."id", ? FROM "schema" AS s0 INNER JOIN } <> + ~s{(SELECT * FROM schema2) AS f1 ON f1."id" = s0."id"} + end + + test "join with query interpolation" do + inner = Ecto.Queryable.to_query(Schema2) + query = from(p in Schema, left_join: c in ^inner, select: {p.id, c.id}) |> plan() + assert all(query) == + "SELECT s0.\"id\", s1.\"id\" FROM \"schema\" AS s0 LEFT OUTER JOIN \"schema2\" AS s1 ON 1" + end + + test "lateral join with fragment" do + query = Schema + |> join(:inner_lateral, [p], q in fragment("SELECT * FROM schema2 AS s2 WHERE s2.id = ? AND s2.field = ?", p.x, ^10)) + |> select([p, q], {p.id, q.z}) + |> where([p], p.id > 0 and p.id < ^100) + |> plan() + assert_raise Ecto.QueryError, fn -> + all(query) + end + end + + test "cross lateral join with fragment" do + query = Schema + |> join(:cross_lateral, [p], q in fragment("SELECT * FROM schema2 AS s2 WHERE s2.id = ? AND s2.field = ?", p.x, ^10)) + |> select([p, q], {p.id, q.z}) + |> where([p], p.id > 0 and p.id < ^100) + |> plan() + assert_raise Ecto.QueryError, fn -> + all(query) + end + end + + test "cross join" do + query = from(p in Schema, cross_join: c in Schema2, select: {p.id, c.id}) |> plan() + assert all(query) == + "SELECT s0.\"id\", s1.\"id\" FROM \"schema\" AS s0 CROSS JOIN \"schema2\" AS s1" + end + + test "cross join with fragment" do + query = from(p in Schema, cross_join: fragment("json_each(?)", p.j), select: {p.id}) |> plan() + assert all(query) == + ~s{SELECT s0."id" FROM "schema" AS s0 CROSS JOIN json_each(s0."j") AS f1} + end + + test "join produces correct bindings" do + query = from(p in Schema, join: c in Schema2, on: true) + query = from(p in query, join: c in Schema2, on: true, select: {p.id, c.id}) + query = plan(query) + assert all(query) == + "SELECT s0.\"id\", s2.\"id\" FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON 1 INNER JOIN \"schema2\" AS s2 ON 1" + end + + describe "query interpolation parameters" do + test "self join on subquery" do + subquery = select(Schema, [r], %{x: r.x, y: r.y}) + query = subquery |> join(:inner, [c], p in subquery(subquery), on: true) |> plan() + assert all(query) == + ~s{SELECT s0."x", s0."y" FROM "schema" AS s0 INNER JOIN } <> + ~s{(SELECT ss0."x" AS "x", ss0."y" AS "y" FROM "schema" AS ss0) } <> + ~s{AS s1 ON 1} + end + + test "self join on subquery with fragment" do + subquery = select(Schema, [r], %{string: fragment("downcase(?)", ^"string")}) + query = subquery |> join(:inner, [c], p in subquery(subquery), on: true) |> plan() + assert all(query) == + ~s{SELECT downcase(?) FROM "schema" AS s0 INNER JOIN } <> + ~s{(SELECT downcase(?) AS "string" FROM "schema" AS ss0) } <> + ~s{AS s1 ON 1} + end + + test "join on subquery with simple select" do + subquery = select(Schema, [r], %{x: ^999, w: ^888}) + query = Schema + |> select([r], %{y: ^666}) + |> join(:inner, [c], p in subquery(subquery), on: true) + |> where([a, b], a.x == ^111) + |> plan() + + assert all(query) == + ~s{SELECT ? FROM "schema" AS s0 INNER JOIN } <> + ~s{(SELECT ? AS "x", ? AS "w" FROM "schema" AS ss0) AS s1 ON 1 } <> + ~s{WHERE (s0."x" = ?)} + end + end + + ## Associations + + test "association join belongs_to" do + query = Schema2 |> join(:inner, [c], p in assoc(c, :post)) |> select([], true) |> plan() + assert all(query) == + "SELECT 1 FROM \"schema2\" AS s0 INNER JOIN \"schema\" AS s1 ON s1.\"x\" = s0.\"z\"" + end + + test "association join has_many" do + query = Schema |> join(:inner, [p], c in assoc(p, :comments)) |> select([], true) |> plan() + assert all(query) == + "SELECT 1 FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON s1.\"z\" = s0.\"x\"" + end + + test "association join has_one" do + query = Schema |> join(:inner, [p], pp in assoc(p, :permalink)) |> select([], true) |> plan() + assert all(query) == + "SELECT 1 FROM \"schema\" AS s0 INNER JOIN \"schema3\" AS s1 ON s1.\"id\" = s0.\"y\"" + end + + # Schema based + + test "insert" do + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:raise, [], []}, [:id]) + assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) RETURNING "id"} + + assert_raise ArgumentError, fn -> + insert(nil, "schema", [:x, :y], [[:x, :y], [nil, :z]], {:raise, [], []}, [:id]) + end + + assert_raise ArgumentError, fn -> + insert(nil, "schema", [:x, :y], [[:x, :y], [nil, :z]], {:raise, [], []}, [:id], [1, 2]) + end + + query = insert(nil, "schema", [], [[]], {:raise, [], []}, [:id]) + assert query == ~s{INSERT INTO "schema" DEFAULT VALUES RETURNING "id"} + + query = insert(nil, "schema", [], [[]], {:raise, [], []}, []) + assert query == ~s{INSERT INTO "schema" DEFAULT VALUES} + + query = insert("prefix", "schema", [], [[]], {:raise, [], []}, []) + assert query == ~s{INSERT INTO "prefix"."schema" DEFAULT VALUES} + end + + test "insert with on conflict" do + # For :nothing + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:nothing, [], []}, []) + assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) ON CONFLICT DO NOTHING} + + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:nothing, [], [:x, :y]}, []) + assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) ON CONFLICT ("x","y") DO NOTHING} + + # For :update + update = from("schema", update: [set: [z: "foo"]]) |> plan(:update_all) + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) + assert query == ~s{INSERT INTO "schema" AS s0 ("x","y") VALUES (?,?) ON CONFLICT ("x","y") DO UPDATE SET "z" = 'foo' RETURNING "z"} + + # For :replace_all + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {[:x, :y], [], [:id]}, []) + assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) ON CONFLICT ("id") DO UPDATE SET "x" = EXCLUDED."x","y" = EXCLUDED."y"} + + # TODO + # query = insert(nil, "schema", [:x, :y], [[:x, :y]], {[:x, :y], [], {:unsafe_fragment, "(\"id\")"}}, []) + #assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) ON CONFLICT (\"id\") DO UPDATE SET "x" = EXCLUDED."x","y" = EXCLUDED."y"} + + #assert_raise ArgumentError, "the :conflict_target option is required on upserts by PostgreSQL", fn -> + # insert(nil, "schema", [:x, :y], [[:x, :y]], {[:x, :y], [], []}, []) + #end + end + + test "insert with query" do + query = from("schema", select: [:id]) |> plan(:all) + assert_raise ArgumentError, fn -> + insert(nil, "schema", [:x, :y, :z], [[:x, {query, 3}, :z], [nil, {query, 2}, :z]], {:raise, [], []}, [:id]) + end + + end + + test "insert with query as rows" do + query = from(s in "schema", select: %{ foo: fragment("3"), bar: s.bar }) |> plan(:all) + + assert_raise ArgumentError, fn -> + insert(nil, "schema", [:foo, :bar], query, {:raise, [], []}, [:foo]) + end + + query = from(s in "schema", select: %{ foo: fragment("3"), bar: s.bar }, where: true) |> plan(:all) + query = insert(nil, "schema", [:foo, :bar], query, {:raise, [], []}, [:foo]) + assert query == ~s{INSERT INTO "schema" ("foo","bar") SELECT 3, s0."bar" FROM "schema" AS s0 WHERE (1) RETURNING "foo"} + end + + test "update" do + query = update(nil, "schema", [:x, :y], [id: 1], []) + assert query == ~s{UPDATE "schema" SET "x" = ?, "y" = ? WHERE "id" = ?} + + query = update(nil, "schema", [:x, :y], [id: 1], [:z]) + assert query == ~s{UPDATE "schema" SET "x" = ?, "y" = ? WHERE "id" = ? RETURNING "z"} + + query = update("prefix", "schema", [:x, :y], [id: 1], []) + assert query == ~s{UPDATE "prefix"."schema" SET "x" = ?, "y" = ? WHERE "id" = ?} + + query = update("prefix", "schema", [:x, :y], [id: 1, updated_at: nil], []) + assert query == ~s{UPDATE "prefix"."schema" SET "x" = ?, "y" = ? WHERE "id" = ? AND "updated_at" IS NULL} + end + + test "delete" do + query = delete(nil, "schema", [x: 1, y: 2], []) + assert query == ~s{DELETE FROM "schema" WHERE "x" = ? AND "y" = ?} + + query = delete(nil, "schema", [x: 1, y: 2], [:z]) + assert query == ~s{DELETE FROM "schema" WHERE "x" = ? AND "y" = ? RETURNING "z"} + + query = delete("prefix", "schema", [x: 1, y: 2], []) + assert query == ~s{DELETE FROM "prefix"."schema" WHERE "x" = ? AND "y" = ?} + + query = delete("prefix", "schema", [x: nil, y: 1], []) + assert query == ~s{DELETE FROM "prefix"."schema" WHERE "x" IS NULL AND "y" = ?} + end + + # DDL + + alias Ecto.Migration.Reference + import Ecto.Migration, only: [table: 1, table: 2, index: 2, index: 3, + constraint: 2, constraint: 3] + + test "executing a string during migration" do + assert execute_ddl("example") == ["example"] + end + + test "create table" do + create = {:create, table(:posts), + [{:add, :name, :string, [default: "Untitled", size: 20, null: false]}, + {:add, :price, :numeric, [precision: 8, scale: 2, default: {:fragment, "expr"}]}, + {:add, :on_hand, :integer, [default: 0, null: true]}, + {:add, :is_active, :boolean, [default: true]}, + {:add, :tags, {:array, :string}, [default: []]}, + {:add, :languages, {:array, :string}, [default: ["pt", "es"]]}, + {:add, :limits, {:array, :integer}, [default: [100, 30_000]]}]} + + assert execute_ddl(create) == [""" + CREATE TABLE "posts" ("name" TEXT DEFAULT 'Untitled' NOT NULL, + "price" NUMERIC DEFAULT expr, + "on_hand" INTEGER DEFAULT 0 NULL, + "is_active" INTEGER DEFAULT true, + "tags" TEXT DEFAULT ('[]'), + "languages" TEXT DEFAULT ('["pt","es"]'), + "limits" TEXT DEFAULT ('[100,30000]')) + """ |> remove_newlines] + end + + test "create table with prefix" do + create = {:create, table(:posts, prefix: :foo), + [{:add, :category_0, %Reference{table: :categories}, []}]} + + assert execute_ddl(create) == [""" + CREATE TABLE "foo"."posts" + ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "foo"."categories"("id")) + """ |> remove_newlines] + end + + # TODO should we raise on comment? + test "create table with comment on columns" do + create = {:create, table(:posts, comment: "comment"), + [ + {:add, :category_0, %Reference{table: :categories}, [comment: "column comment"]}, + {:add, :created_at, :timestamp, []}, + {:add, :updated_at, :timestamp, [comment: "column comment 2"]} + ]} + assert execute_ddl(create) == [remove_newlines(""" + CREATE TABLE "posts" + ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), "created_at" TEXT, "updated_at" TEXT) + """) + ] + end + + # TODO should we raise on comment? + test "create table with comment on table" do + create = {:create, table(:posts, comment: "table comment", prefix: "foo"), + [{:add, :category_0, %Reference{table: :categories}, []}]} + assert execute_ddl(create) == [remove_newlines(""" + CREATE TABLE "foo"."posts" + ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "foo"."categories"("id")) + """)] + end + + # TODO should we raise on comment? + test "create table with comment on columns" do + create = {:create, table(:posts, prefix: "foo"), + [ + {:add, :category_0, %Reference{table: :categories}, [comment: "column comment"]}, + {:add, :created_at, :timestamp, []}, + {:add, :updated_at, :timestamp, [comment: "column comment 2"]} + ]} + assert execute_ddl(create) == [remove_newlines(""" + CREATE TABLE "foo"."posts" + ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "foo"."categories"("id"), "created_at" TEXT, "updated_at" TEXT) + """)] + end + + test "create table with references" do + create = {:create, table(:posts), + [{:add, :id, :serial, [primary_key: true]}, + {:add, :category_0, %Reference{table: :categories}, []}, + {:add, :category_1, %Reference{table: :categories, name: :foo_bar}, []}, + {:add, :category_2, %Reference{table: :categories, on_delete: :nothing}, []}, + {:add, :category_3, %Reference{table: :categories, on_delete: :delete_all}, [null: false]}, + {:add, :category_4, %Reference{table: :categories, on_delete: :nilify_all}, []}, + {:add, :category_5, %Reference{table: :categories, on_update: :nothing}, []}, + {:add, :category_6, %Reference{table: :categories, on_update: :update_all}, [null: false]}, + {:add, :category_7, %Reference{table: :categories, on_update: :nilify_all}, []}, + {:add, :category_8, %Reference{table: :categories, on_delete: :nilify_all, on_update: :update_all}, [null: false]}, + {:add, :category_9, %Reference{table: :categories, on_delete: :restrict}, []}, + {:add, :category_10, %Reference{table: :categories, on_update: :restrict}, []}, + {:add, :category_11, %Reference{table: :categories, prefix: "foo", on_update: :restrict}, []} + ]} + + assert execute_ddl(create) == [""" + CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, + "category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), + "category_1" INTEGER CONSTRAINT "foo_bar" REFERENCES "categories"("id"), + "category_2" INTEGER CONSTRAINT "posts_category_2_fkey" REFERENCES "categories"("id"), + "category_3" INTEGER NOT NULL CONSTRAINT "posts_category_3_fkey" REFERENCES "categories"("id") ON DELETE CASCADE, + "category_4" INTEGER CONSTRAINT "posts_category_4_fkey" REFERENCES "categories"("id") ON DELETE SET NULL, + "category_5" INTEGER CONSTRAINT "posts_category_5_fkey" REFERENCES "categories"("id"), + "category_6" INTEGER NOT NULL CONSTRAINT "posts_category_6_fkey" REFERENCES "categories"("id") ON UPDATE CASCADE, + "category_7" INTEGER CONSTRAINT "posts_category_7_fkey" REFERENCES "categories"("id") ON UPDATE SET NULL, + "category_8" INTEGER NOT NULL CONSTRAINT "posts_category_8_fkey" REFERENCES "categories"("id") ON DELETE SET NULL ON UPDATE CASCADE, + "category_9" INTEGER CONSTRAINT "posts_category_9_fkey" REFERENCES "categories"("id") ON DELETE RESTRICT, + "category_10" INTEGER CONSTRAINT "posts_category_10_fkey" REFERENCES "categories"("id") ON UPDATE RESTRICT, + "category_11" INTEGER CONSTRAINT "posts_category_11_fkey" REFERENCES "foo"."categories"("id") ON UPDATE RESTRICT) + """ |> remove_newlines] + end + + test "create table with options" do + create = {:create, table(:posts, [options: "WITH FOO=BAR"]), + [{:add, :id, :serial, [primary_key: true]}, + {:add, :created_at, :naive_datetime, []}]} + assert execute_ddl(create) == + [~s|CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "created_at" TEXT) WITH FOO=BAR|] + end + + test "create table with composite key" do + create = {:create, table(:posts), + [{:add, :a, :integer, [primary_key: true]}, + {:add, :b, :integer, [primary_key: true]}, + {:add, :name, :string, []}]} + + assert execute_ddl(create) == [""" + CREATE TABLE "posts" ("a" INTEGER, "b" INTEGER, "name" TEXT, PRIMARY KEY ("a","b")) + """ |> remove_newlines] + end + + # TODO not relevant to SQLite? + """ + test "create table with identity key and references" do + create = {:create, table(:posts), + [{:add, :id, :integer, [primary_key: true]}, + {:add, :category_0, %Reference{table: :categories, type: :identity}, []}, + {:add, :name, :string, []}]} + + assert execute_ddl(create) == [""" + CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT BY DEFAULT AS IDENTITY, + "category_0" INTEGER, CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), + "name" TEXT, + PRIMARY KEY ("id")) """ |> remove_newlines] + end + + test "create table with identity key options" do + create = {:create, table(:posts), + [{:add, :id, :identity, [primary_key: true, start_value: 1_000, increment: 10]}, + {:add, :name, :string, []}]} + + assert execute_ddl(create) == [""" + CREATE TABLE "posts" ("id" INTEGER GENERATED BY DEFAULT AS IDENTITY(START WITH 1000 INCREMENT BY 10) , "name" TEXT, PRIMARY KEY ("id")) """ |> remove_newlines] + end + + test "create table with binary column and null-byte default" do + create = {:create, table(:blobs), + [{:add, :blob, :binary, [default: <<0>>]}]} + + assert_raise ArgumentError, ~r/"\\x00"/, fn -> + execute_ddl(create) + end + end + + test "create table with binary column and null-byte-containing default" do + create = {:create, table(:blobs), + [{:add, :blob, :binary, [default: "foo" <> <<0>>]}]} + + assert_raise ArgumentError, ~r/"\\x666f6f00"/, fn -> + execute_ddl(create) + end + end + """ + + test "create table with binary column and UTF-8 default" do + create = {:create, table(:blobs), + [{:add, :blob, :binary, [default: "foo"]}]} + + assert execute_ddl(create) == [""" + CREATE TABLE "blobs" ("blob" BLOB DEFAULT 'foo') + """ |> remove_newlines] + end + + test "create table with binary column and hex blob literal default" do + create = {:create, table(:blobs), + [{:add, :blob, :binary, [default: "\\x666F6F"]}]} + + assert execute_ddl(create) == [""" + CREATE TABLE "blobs" ("blob" BLOB DEFAULT '\\\\x666F6F') + """ |> remove_newlines] + end + + test "create table with binary column and hex blob literal null-byte" do + create = {:create, table(:blobs), + [{:add, :blob, :binary, [default: "\\\x00"]}]} + + assert execute_ddl(create) == [""" + CREATE TABLE "blobs" ("blob" BLOB DEFAULT '\\\\\x00') + """ |> remove_newlines] + end + + test "create table with a map column, and an empty map default" do + create = {:create, table(:posts), + [ + {:add, :a, :map, [default: %{}]} + ] + } + assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("a" TEXT DEFAULT ('{}'))|] + end + + test "create table with a map column, and a map default with values" do + create = {:create, table(:posts), + [ + {:add, :a, :map, [default: %{foo: "bar", baz: "boom"}]} + ] + } + assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("a" TEXT DEFAULT ('{"baz":"boom","foo":"bar"}'))|] + end + + test "create table with a map column, and a string default" do + create = {:create, table(:posts), + [ + {:add, :a, :map, [default: ~s|{"foo":"bar","baz":"boom"}|]} + ] + } + assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("a" TEXT DEFAULT '{"foo":"bar","baz":"boom"}')|] + end + + test "create table with time columns" do + create = {:create, table(:posts), + [{:add, :published_at, :time, [precision: 3]}, + {:add, :submitted_at, :time, []}]} + + assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] + end + + test "create table with time_usec columns" do + create = {:create, table(:posts), + [{:add, :published_at, :time_usec, [precision: 3]}, + {:add, :submitted_at, :time_usec, []}]} + + assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] + end + + test "create table with utc_datetime columns" do + create = {:create, table(:posts), + [{:add, :published_at, :utc_datetime, [precision: 3]}, + {:add, :submitted_at, :utc_datetime, []}]} + + assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] + end + + test "create table with utc_datetime_usec columns" do + create = {:create, table(:posts), + [{:add, :published_at, :utc_datetime_usec, [precision: 3]}, + {:add, :submitted_at, :utc_datetime_usec, []}]} + + assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] + end + + test "create table with naive_datetime columns" do + create = {:create, table(:posts), + [{:add, :published_at, :naive_datetime, [precision: 3]}, + {:add, :submitted_at, :naive_datetime, []}]} + + assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] + end + + test "create table with naive_datetime_usec columns" do + create = {:create, table(:posts), + [{:add, :published_at, :naive_datetime_usec, [precision: 3]}, + {:add, :submitted_at, :naive_datetime_usec, []}]} + + assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] + end + + test "create table with an unsupported type" do + create = {:create, table(:posts), + [ + {:add, :a, {:a, :b, :c}, [default: %{}]} + ] + } + assert_raise ArgumentError, + "unsupported type `{:a, :b, :c}`. " <> + "The type can either be an atom, a string or a tuple of the form " <> + "`{:map, t}` or `{:array, t}` where `t` itself follows the same conditions.", + fn -> execute_ddl(create) end + end + + test "drop table" do + drop = {:drop, table(:posts), :restrict} + assert execute_ddl(drop) == [~s|DROP TABLE "posts"|] + end + + test "drop table with prefix" do + drop = {:drop, table(:posts, prefix: :foo), :restrict} + assert execute_ddl(drop) == [~s|DROP TABLE "foo"."posts"|] + end + + test "drop table with cascade" do + drop = {:drop, table(:posts), :cascade} + + log = capture_io(:stderr, fn -> + assert execute_ddl(drop) == [~s|DROP TABLE "posts"|] + end) + + assert log =~ ":cascade" + + log = capture_io(:stderr, fn -> + drop = {:drop, table(:posts, prefix: :foo), :cascade} + assert execute_ddl(drop) == [~s|DROP TABLE "foo"."posts"|] + end) + + assert log =~ ":cascade" + end + + test "alter table" do + alter = {:alter, table(:posts), + [{:add, :title, :string, [default: "Untitled", size: 100, null: false]}, + {:add, :author_id, %Reference{table: :author}, []}, + {:add, :category_id, %Reference{table: :categories, validate: false}, []}, + {:remove, :summary}, + {:remove, :body, :text, []}, + {:remove, :space_id, %Reference{table: :author}, []}]} + + assert execute_ddl(alter) == [ + ~s|ALTER TABLE "posts" ADD COLUMN "title" TEXT DEFAULT 'Untitled' NOT NULL|, + ~s|ALTER TABLE "posts" ADD COLUMN "author_id" INTEGER CONSTRAINT "posts_author_id_fkey" REFERENCES "author"("id")|, + ~s|ALTER TABLE "posts" ADD COLUMN "category_id" INTEGER CONSTRAINT "posts_category_id_fkey" REFERENCES "categories"("id")|, + ~s|ALTER TABLE "posts" DROP COLUMN "summary"|, + ~s|ALTER TABLE "posts" DROP COLUMN "body"|, + ~s|ALTER TABLE "posts" DROP COLUMN "space_id"| + ] + end + + test "alter table with comments on table and columns, ignore comments" do + alter = {:alter, table(:posts, comment: "table comment"), + [{:add, :title, :string, [default: "Untitled", size: 100, null: false, comment: "column comment"]}, + {:remove, :summary} + ]} + + assert execute_ddl(alter) == [remove_newlines(""" + ALTER TABLE "posts" + ADD COLUMN "title" TEXT DEFAULT 'Untitled' NOT NULL + """), remove_newlines(""" + ALTER TABLE "posts" + DROP COLUMN "summary" + """)] + end + + test "alter table with prefix" do + alter = {:alter, table(:posts, prefix: :foo), + [{:add, :author_id, %Reference{table: :author}, []}]} + + assert execute_ddl(alter) == [""" + ALTER TABLE "foo"."posts" + ADD COLUMN "author_id" INTEGER CONSTRAINT "posts_author_id_fkey" REFERENCES "foo"."author"("id") + """ |> remove_newlines] + end + + test "alter table with serial primary key" do + alter = {:alter, table(:posts), + [{:add, :my_pk, :serial, [primary_key: true]}]} + + assert execute_ddl(alter) == [""" + ALTER TABLE "posts" + ADD COLUMN "my_pk" INTEGER PRIMARY KEY AUTOINCREMENT + """ |> remove_newlines] + end + + test "alter table with bigserial primary key" do + alter = {:alter, table(:posts), + [{:add, :my_pk, :bigserial, [primary_key: true]}]} + + assert execute_ddl(alter) == [""" + ALTER TABLE "posts" + ADD COLUMN "my_pk" INTEGER PRIMARY KEY AUTOINCREMENT + """ |> remove_newlines] + end + + test "create index" do + create = {:create, index(:posts, [:category_id, :permalink])} + assert execute_ddl(create) == + [~s|CREATE INDEX "posts_category_id_permalink_index" ON "posts" ("category_id", "permalink")|] + + create = {:create, index(:posts, ["lower(permalink)"], name: "posts$main")} + assert execute_ddl(create) == + [~s|CREATE INDEX "posts$main" ON "posts" (lower(permalink))|] + end + + test "create index with prefix" do + create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo)} + assert execute_ddl(create) == + [~s|CREATE INDEX "posts_category_id_permalink_index" ON "foo"."posts" ("category_id", "permalink")|] + + create = {:create, index(:posts, ["lower(permalink)"], name: "posts$main", prefix: :foo)} + assert execute_ddl(create) == + [~s|CREATE INDEX "posts$main" ON "foo"."posts" (lower(permalink))|] + end + + test "create index with comment" do + create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo, comment: "comment")} + assert execute_ddl(create) == [remove_newlines(""" + CREATE INDEX "posts_category_id_permalink_index" ON "foo"."posts" ("category_id", "permalink") + """)] + end + + test "create unique index" do + create = {:create, index(:posts, [:permalink], unique: true)} + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink")|] + end + + test "create unique index with condition" do + create = {:create, index(:posts, [:permalink], unique: true, where: "public IS 1")} + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") WHERE public IS 1|] + + create = {:create, index(:posts, [:permalink], unique: true, where: :public)} + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") WHERE public|] + end + + # TODO Not Supported? + """ + test "create index with include fields" do + create = {:create, index(:posts, [:permalink], unique: true, include: [:public])} + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") INCLUDE ("public")|] + + create = {:create, index(:posts, [:permalink], unique: true, include: [:public], where: "public IS 1")} + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") INCLUDE ("public") WHERE public IS 1|] + end + + test "create unique index with nulls_distinct option" do + create = {:create, index(:posts, [:permalink], unique: true, nulls_distinct: true)} + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") NULLS DISTINCT|] + + create = {:create, index(:posts, [:permalink], unique: true, nulls_distinct: false)} + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") NULLS NOT DISTINCT|] + + create = {:create, index(:posts, [:permalink], unique: true, nulls_distinct: false, include: [:public], where: "public IS 1")} + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") INCLUDE ("public") NULLS NOT DISTINCT WHERE public IS 1|] + end + + test "create index concurrently not supported" do + index = index(:posts, [:permalink]) + create = {:create, %{index | concurrently: true}} + assert execute_ddl(create) == + [~s|CREATE INDEX "posts_permalink_index" ON "posts" ("permalink")|] + end + + test "create unique index concurrently not supported" do + index = index(:posts, [:permalink], unique: true) + create = {:create, %{index | concurrently: true}} + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink")|] + end + """ + + test "create an index using a different type" do + create = {:create, index(:posts, [:permalink], using: :hash)} + assert execute_ddl(create) == + [~s|CREATE INDEX "posts_permalink_index" ON "posts" ("permalink")|] + end + + # TODO doesn't support + """ + test "create an index without recursively creating indexes on partitions" do + create = {:create, index(:posts, [:permalink], only: true)} + assert execute_ddl(create) == + [~s|CREATE INDEX "posts_permalink_index" ON ONLY "posts" ("permalink")|] + end + """ + + test "drop index" do + drop = {:drop, index(:posts, [:id], name: "posts$main"), :restrict} + assert execute_ddl(drop) == [~s|DROP INDEX "posts$main"|] + end + + test "drop index with prefix" do + drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo), :restrict} + assert execute_ddl(drop) == [~s|DROP INDEX "foo"."posts$main"|] + end + + test "drop index concurrently not supported" do + index = index(:posts, [:id], name: "posts$main") + drop = {:drop, %{index | concurrently: true}, :restrict} + assert execute_ddl(drop) == [~s|DROP INDEX "posts$main"|] + end + + test "drop index with cascade" do + assert capture_io(:stderr, fn -> + drop = {:drop, index(:posts, [:id], name: "posts$main"), :cascade} + assert execute_ddl(drop) == [~s|DROP INDEX "posts$main"|] + end) =~ ":cascade" + + assert capture_io(:stderr, fn -> + drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo), :cascade} + assert execute_ddl(drop) == [~s|DROP INDEX "foo"."posts$main"|] + end) =~ ":cascade" + end + + # TODO SQLITE doesn't support alter table but you could add/remove? + """ + test "create check constraint" do + create = {:create, constraint(:products, "price_must_be_positive", check: "price > 0")} + assert execute_ddl(create) == + [~s|ALTER TABLE "products" ADD CONSTRAINT "price_must_be_positive" CHECK (price > 0)|] + + create = {:create, constraint(:products, "price_must_be_positive", check: "price > 0", prefix: "foo")} + assert execute_ddl(create) == + [~s|ALTER TABLE "foo"."products" ADD CONSTRAINT "price_must_be_positive" CHECK (price > 0)|] + end + + test "create exclusion constraint" do + create = {:create, constraint(:products, "price_must_be_positive", exclude: ~s|gist (int4range("from", "to", '[]') WITH &&)|)} + assert execute_ddl(create) == + [~s|ALTER TABLE "products" ADD CONSTRAINT "price_must_be_positive" EXCLUDE USING gist (int4range("from", "to", '[]') WITH &&)|] + end + + test "create constraint with comment" do + create = {:create, constraint(:products, "price_must_be_positive", check: "price > 0", prefix: "foo", comment: "comment")} + assert execute_ddl(create) == [remove_newlines("ALTER TABLE "foo"."products" ADD CONSTRAINT "price_must_be_positive" CHECK (price > 0)"), + ~s|COMMENT ON CONSTRAINT "price_must_be_positive" ON "foo"."products" IS 'comment'|] + end + + test "create invalid constraint" do + create = {:create, constraint(:products, "price_must_be_positive", check: "price > 0", prefix: "foo", validate: false)} + assert execute_ddl(create) == [~s|ALTER TABLE "foo"."products" ADD CONSTRAINT "price_must_be_positive" CHECK (price > 0) NOT VALID|] + end + + test "drop constraint" do + drop = {:drop, constraint(:products, "price_must_be_positive"), :restrict} + assert execute_ddl(drop) == + [~s|ALTER TABLE "products" DROP CONSTRAINT "price_must_be_positive"|] + + drop = {:drop, constraint(:products, "price_must_be_positive", prefix: "foo"), :restrict} + assert execute_ddl(drop) == + [~s|ALTER TABLE "foo"."products" DROP CONSTRAINT "price_must_be_positive"|] + end + + test "drop_if_exists constraint" do + drop = {:drop_if_exists, constraint(:products, "price_must_be_positive"), :restrict} + assert execute_ddl(drop) == + [~s|ALTER TABLE "products" DROP CONSTRAINT IF EXISTS "price_must_be_positive"|] + + drop = {:drop_if_exists, constraint(:products, "price_must_be_positive", prefix: "foo"), :restrict} + assert execute_ddl(drop) == + [~s|ALTER TABLE "foo"."products" DROP CONSTRAINT IF EXISTS "price_must_be_positive"|] + end + """ + + test "rename table" do + rename = {:rename, table(:posts), table(:new_posts)} + assert execute_ddl(rename) == [~s|ALTER TABLE "posts" RENAME TO "new_posts"|] + end + + test "rename table with prefix" do + rename = {:rename, table(:posts, prefix: :foo), table(:new_posts, prefix: :foo)} + assert execute_ddl(rename) == [~s|ALTER TABLE "foo"."posts" RENAME TO "new_posts"|] + end + + test "rename column" do + rename = {:rename, table(:posts), :given_name, :first_name} + assert execute_ddl(rename) == [~s|ALTER TABLE "posts" RENAME COLUMN "given_name" TO "first_name"|] + end + + test "rename column in prefixed table" do + rename = {:rename, table(:posts, prefix: :foo), :given_name, :first_name} + assert execute_ddl(rename) == [~s|ALTER TABLE "foo"."posts" RENAME COLUMN "given_name" TO "first_name"|] + end + + defp remove_newlines(string) do + string |> String.trim |> String.replace("\n", " ") + end + +end From c8d08ed800339b4700e46babd120c699dfe10934 Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Sat, 21 Jan 2023 12:24:23 -0600 Subject: [PATCH 02/17] Removing warns --- lib/ecto/adapters/sqlite3/connection.ex | 16 +++--- .../adapters/sqlite3/pg_connection_test.exs | 56 ++++--------------- 2 files changed, 20 insertions(+), 52 deletions(-) diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index 7366b12..573d483 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -428,10 +428,10 @@ defmodule Ecto.Adapters.SQLite3.Connection do @impl true def execute_ddl({:drop, %Table{} = table, mode}) do if mode != [] do - IO.warn """ - `#{inspect(mode)}` is not supported for DROP TABLE with SQLite3 \ - DROP TABLE #{table.name} cannot have options set. - """, [] + raise ArgumentError, """ + `#{inspect(mode)}` is not supported for DROP TABLE with SQLite3 \ + DROP TABLE #{table.name} cannot have options set. + """ end execute_ddl({:drop, table}) end @@ -517,10 +517,10 @@ defmodule Ecto.Adapters.SQLite3.Connection do def execute_ddl({:drop, %Index{} = index, mode}) do if mode != [] do - IO.warn """ - `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ - DROP INDEX #{index.name} cannot have options set. - """, [] + raise ArgumentError, """ + `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ + DROP INDEX #{index.name} cannot have options set. + """ end execute_ddl({:drop, index}) end diff --git a/test/ecto/adapters/sqlite3/pg_connection_test.exs b/test/ecto/adapters/sqlite3/pg_connection_test.exs index 7b1e7fe..1e163e3 100644 --- a/test/ecto/adapters/sqlite3/pg_connection_test.exs +++ b/test/ecto/adapters/sqlite3/pg_connection_test.exs @@ -2,8 +2,6 @@ defmodule Ecto.Adapters.SQLite3Test do use ExUnit.Case, async: false import Ecto.Query - import ExUnit.CaptureIO - alias Ecto.Queryable alias Ecto.Adapters.SQLite3.Connection, as: SQL @@ -1407,30 +1405,6 @@ defmodule Ecto.Adapters.SQLite3Test do ] end - # TODO should we raise on comment? - test "create table with comment on table" do - create = {:create, table(:posts, comment: "table comment", prefix: "foo"), - [{:add, :category_0, %Reference{table: :categories}, []}]} - assert execute_ddl(create) == [remove_newlines(""" - CREATE TABLE "foo"."posts" - ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "foo"."categories"("id")) - """)] - end - - # TODO should we raise on comment? - test "create table with comment on columns" do - create = {:create, table(:posts, prefix: "foo"), - [ - {:add, :category_0, %Reference{table: :categories}, [comment: "column comment"]}, - {:add, :created_at, :timestamp, []}, - {:add, :updated_at, :timestamp, [comment: "column comment 2"]} - ]} - assert execute_ddl(create) == [remove_newlines(""" - CREATE TABLE "foo"."posts" - ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "foo"."categories"("id"), "created_at" TEXT, "updated_at" TEXT) - """)] - end - test "create table with references" do create = {:create, table(:posts), [{:add, :id, :serial, [primary_key: true]}, @@ -1655,18 +1629,13 @@ defmodule Ecto.Adapters.SQLite3Test do test "drop table with cascade" do drop = {:drop, table(:posts), :cascade} - log = capture_io(:stderr, fn -> - assert execute_ddl(drop) == [~s|DROP TABLE "posts"|] - end) - - assert log =~ ":cascade" - - log = capture_io(:stderr, fn -> - drop = {:drop, table(:posts, prefix: :foo), :cascade} - assert execute_ddl(drop) == [~s|DROP TABLE "foo"."posts"|] - end) + assert_raise ArgumentError, fn-> + execute_ddl(drop) + end - assert log =~ ":cascade" + assert_raise ArgumentError, fn-> + execute_ddl(drop) + end end test "alter table" do @@ -1849,15 +1818,15 @@ defmodule Ecto.Adapters.SQLite3Test do end test "drop index with cascade" do - assert capture_io(:stderr, fn -> + assert_raise ArgumentError, fn -> drop = {:drop, index(:posts, [:id], name: "posts$main"), :cascade} - assert execute_ddl(drop) == [~s|DROP INDEX "posts$main"|] - end) =~ ":cascade" + execute_ddl(drop) + end - assert capture_io(:stderr, fn -> + assert_raise ArgumentError, fn -> drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo), :cascade} - assert execute_ddl(drop) == [~s|DROP INDEX "foo"."posts$main"|] - end) =~ ":cascade" + execute_ddl(drop) + end end # TODO SQLITE doesn't support alter table but you could add/remove? @@ -1933,5 +1902,4 @@ defmodule Ecto.Adapters.SQLite3Test do defp remove_newlines(string) do string |> String.trim |> String.replace("\n", " ") end - end From 75e398b22088b6ae62f103a4a1503b83dd1c92b9 Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Tue, 24 Jan 2023 10:22:08 -0600 Subject: [PATCH 03/17] in tests working --- lib/ecto/adapters/sqlite3/connection.ex | 7 +- test/ecto/adapters/sqlite3/data_type_test.exs | 16 ++--- .../adapters/sqlite3/pg_connection_test.exs | 65 ++++++++++++------- 3 files changed, 57 insertions(+), 31 deletions(-) diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index 573d483..b0f12f7 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -1242,6 +1242,10 @@ defmodule Ecto.Adapters.SQLite3.Connection do "0" end + def expr({:in, _, [_left, "[]"]}, _sources, _query) do + "0" + end + def expr({:in, _, [left, right]}, sources, query) when is_list(right) do args = intersperse_map(right, ?,, &expr(&1, sources, query)) [expr(left, sources, query), " IN (", args, ?)] @@ -1260,7 +1264,7 @@ defmodule Ecto.Adapters.SQLite3.Connection do [expr(left, sources, query), " IN ", expr(subquery, sources, query)] end - def expr({:in, _, [left, right]}, sources, query) do + def expr({:in, a, [left, right]} = expr, sources, query) do [ expr(left, sources, query), " IN (SELECT value FROM JSON_EACH(", @@ -1416,6 +1420,7 @@ defmodule Ecto.Adapters.SQLite3.Connection do def expr(true, _sources, _query), do: "1" def expr(false, _sources, _query), do: "0" + def expr(literal, _sources, _query) when is_binary(literal) do [?', escape_string(literal), ?'] end diff --git a/test/ecto/adapters/sqlite3/data_type_test.exs b/test/ecto/adapters/sqlite3/data_type_test.exs index f62577c..30d7044 100644 --- a/test/ecto/adapters/sqlite3/data_type_test.exs +++ b/test/ecto/adapters/sqlite3/data_type_test.exs @@ -46,20 +46,20 @@ defmodule Ecto.Adapters.SQLite3.DataTypeTest do assert DataType.column_type(:uuid, nil) == "BLOB" end - test ":map is JSON" do - assert DataType.column_type(:map, nil) == "JSON" + test ":map is TEXT" do + assert DataType.column_type(:map, nil) == "TEXT" end - test "{:map, _} is JSON" do - assert DataType.column_type({:map, %{}}, nil) == "JSON" + test "{:map, _} is TEXT" do + assert DataType.column_type({:map, %{}}, nil) == "TEXT" end - test ":array is JSON" do - assert DataType.column_type(:array, nil) == "JSON" + test ":array is TEXT" do + assert DataType.column_type(:array, nil) == "TEXT" end - test "{:array, _} is JSON" do - assert DataType.column_type({:array, []}, nil) == "JSON" + test "{:array, _} is TEXT" do + assert DataType.column_type({:array, []}, nil) == "TEXT" end test ":float is NUMERIC" do diff --git a/test/ecto/adapters/sqlite3/pg_connection_test.exs b/test/ecto/adapters/sqlite3/pg_connection_test.exs index 1e163e3..d529adc 100644 --- a/test/ecto/adapters/sqlite3/pg_connection_test.exs +++ b/test/ecto/adapters/sqlite3/pg_connection_test.exs @@ -4,7 +4,7 @@ defmodule Ecto.Adapters.SQLite3Test do import Ecto.Query alias Ecto.Queryable - alias Ecto.Adapters.SQLite3.Connection, as: SQL + alias Ecto.Adapters.SQLite3.Connection, as: SQL defmodule Schema do use Ecto.Schema @@ -212,7 +212,7 @@ defmodule Ecto.Adapters.SQLite3Test do ~s{INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"} end - # TODO should we warn about locks? or yell? + # TODO should we warn about locks? or yell? test "CTE update_all" do cte_query = from(x in Schema, order_by: [asc: :id], limit: 10, lock: "FOR UPDATE SKIP LOCKED", select: %{id: x.id}) @@ -669,34 +669,55 @@ defmodule Ecto.Adapters.SQLite3Test do test "in expression" do query = Schema |> select([e], 1 in []) |> plan() - assert all(query) == ~s{SELECT false FROM "schema" AS s0} + assert all(query) == ~s{SELECT 0 FROM "schema" AS s0} - query = Schema |> select([e], 1 in [1,e.x,3]) |> plan() - assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} + query = + Schema + |> select([e], 1 in ^[]) + |> plan() - query = Schema |> select([e], 1 in ^[]) |> plan() - assert all(query) == ~s{SELECT 1 = ANY(?) FROM "schema" AS s0} + assert all(query) == ~s{SELECT 0 FROM "schema" AS s0} - query = Schema |> select([e], 1 in ^[1, 2, 3]) |> plan() - assert all(query) == ~s{SELECT 1 = ANY(?) FROM "schema" AS s0} + query = + Schema + |> select([e], 1 in ^[1, 2, 3]) + |> plan() + + assert all(query) == ~s{SELECT 1 IN (?,?,?) FROM "schema" AS s0} + + query = + Schema + |> select([e], 1 in [1, ^2, 3]) + |> plan() - query = Schema |> select([e], 1 in [1, ^2, 3]) |> plan() assert all(query) == ~s{SELECT 1 IN (1,?,3) FROM "schema" AS s0} - query = Schema |> select([e], ^1 in [1, ^2, 3]) |> plan() - assert all(query) == ~s{SELECT ? IN (1,?,3) FROM "schema" AS s0} + query = + Schema + |> select([e], e.x == ^0 or e.x in ^[1, 2, 3] or e.x == ^4) + |> plan() + + assert all(query) == + ~s{SELECT ((s0."x" = ?) OR s0."x" IN (?,?,?)) OR (s0."x" = ?) FROM "schema" AS s0} + + query = + Schema + |> select([e], e in [1, 2, 3]) + |> plan() + + assert all(query) == + ~s{SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM "schema" AS s0} - query = Schema |> select([e], ^1 in ^[1, 2, 3]) |> plan() - assert all(query) == ~s{SELECT ? = ANY(?) FROM "schema" AS s0} - query = Schema |> select([e], 1 in e.w) |> plan() - assert all(query) == ~s{SELECT 1 = ANY(s0."w") FROM "schema" AS s0} + query = Schema |> select([e], 1 in [1,e.x,3]) |> plan() + assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} - query = Schema |> select([e], 1 in fragment("foo")) |> plan() - assert all(query) == ~s{SELECT 1 = ANY(foo) FROM "schema" AS s0} + query = Schema |> select([e], 1 in [1, ^2, 3]) |> plan() + assert all(query) == ~s{SELECT 1 IN (1,?,3) FROM "schema" AS s0} + + query = Schema |> select([e], ^1 in [1, ^2, 3]) |> plan() + assert all(query) == ~s{SELECT ? IN (1,?,3) FROM "schema" AS s0} - query = Schema |> select([e], e.x == ^0 or e.x in ^[1, 2, 3] or e.x == ^4) |> plan() - assert all(query) == ~s{SELECT ((s0."x" = ?) OR s0."x" = ANY(?)) OR (s0."x" = ?) FROM "schema" AS s0} end test "in subquery" do @@ -1820,12 +1841,12 @@ defmodule Ecto.Adapters.SQLite3Test do test "drop index with cascade" do assert_raise ArgumentError, fn -> drop = {:drop, index(:posts, [:id], name: "posts$main"), :cascade} - execute_ddl(drop) + execute_ddl(drop) end assert_raise ArgumentError, fn -> drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo), :cascade} - execute_ddl(drop) + execute_ddl(drop) end end From 5192a5746b21de670a0906a2b43767586f05643d Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Tue, 24 Jan 2023 13:03:18 -0600 Subject: [PATCH 04/17] Bunch of WIP --- lib/ecto/adapters/sqlite3.ex | 17 +- lib/ecto/adapters/sqlite3/connection.ex | 64 ++++-- .../adapters/sqlite3/pg_connection_test.exs | 202 +++++------------- 3 files changed, 108 insertions(+), 175 deletions(-) diff --git a/lib/ecto/adapters/sqlite3.ex b/lib/ecto/adapters/sqlite3.ex index 5e80252..9da7400 100644 --- a/lib/ecto/adapters/sqlite3.ex +++ b/lib/ecto/adapters/sqlite3.ex @@ -439,25 +439,18 @@ defmodule Ecto.Adapters.SQLite3 do [type, &Codec.naive_datetime_encode(&1, dt_type)] end + @impl Ecto.Adapter - def dumpers({:array, _}, type) do - [type, &Codec.json_encode/1] - end + def dumpers({:map, _}, type), do: [&Ecto.Type.embedded_dump(type, &1, :json)] @impl Ecto.Adapter - def dumpers({:map, _}, type) do - [&Ecto.Type.embedded_dump(type, &1, :json), &Codec.json_encode/1] - end + def dumpers({:array, _}, type), do: [&Ecto.Type.embedded_dump(type, &1, :json)] @impl Ecto.Adapter - def dumpers(:map, type) do - [type, &Codec.json_encode/1] - end + def dumpers({:in, sub}, {:in, sub}), do: [{:array, sub}] @impl Ecto.Adapter - def dumpers(_primitive, type) do - [type] - end + def dumpers(_, type), do: [type] ## ## HELPERS diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index b0f12f7..0499a9c 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -463,6 +463,32 @@ defmodule Ecto.Adapters.SQLite3.Connection do end) end + @impl true + def execute_ddl({_, %Index{concurrently: true}}) do + raise ArgumentError, "`concurrently` is not supported with SQLite3" + end + + @impl true + def execute_ddl({_, %Index{only: true}}) do + raise ArgumentError, "`only` is not supported with SQLite3" + end + + @impl true + def execute_ddl({_, %Index{include: x}}) when length(x) != 0 do + raise ArgumentError, "`include` is not supported with SQLite3" + end + + @impl true + def execute_ddl({_, %Index{using: x}}) when not is_nil(x) do + raise ArgumentError, "`using` is not supported with SQLite3" + end + + @impl true + def execute_ddl({_, %Index{nulls_distinct: x}}) when not is_nil(x) do + raise ArgumentError, "`nulls_distinct` is not supported with SQLite3" + end + + @impl true def execute_ddl({:create, %Index{} = index}) do fields = intersperse_map(index.columns, ", ", &index_expr/1) @@ -515,7 +541,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do @impl true def execute_ddl({:drop, %Index{} = index, mode}) do - if mode != [] do raise ArgumentError, """ `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ @@ -525,6 +550,11 @@ defmodule Ecto.Adapters.SQLite3.Connection do execute_ddl({:drop, index}) end + @impl true + def execute_ddl({:drop_if_exists, %Index{concurrently: true}}) do + raise ArgumentError, "`concurrently` is not supported with SQLite3" + end + @impl true def execute_ddl({:drop_if_exists, %Index{} = index}) do [ @@ -536,7 +566,13 @@ defmodule Ecto.Adapters.SQLite3.Connection do end @impl true - def execute_ddl({:drop_if_exists, %Index{} = index, _mode}) do + def execute_ddl({:drop_if_exists, %Index{} = index, mode}) do + if mode != [] do + raise ArgumentError, """ + `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ + DROP INDEX #{index.name} cannot have options set. + """ + end execute_ddl({:drop_if_exists, index}) end @@ -991,6 +1027,12 @@ defmodule Ecto.Adapters.SQLite3.Connection do source: source, hints: hints } -> + if hints != [] do + raise Ecto.QueryError, + query: query, + message: "join hints are not supported by SQLite3" + end + {join, name} = get_source(query, sources, ix, source) [ @@ -1242,10 +1284,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do "0" end - def expr({:in, _, [_left, "[]"]}, _sources, _query) do - "0" - end - def expr({:in, _, [left, right]}, sources, query) when is_list(right) do args = intersperse_map(right, ?,, &expr(&1, sources, query)) [expr(left, sources, query), " IN (", args, ?)] @@ -1264,16 +1302,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do [expr(left, sources, query), " IN ", expr(subquery, sources, query)] end - def expr({:in, a, [left, right]} = expr, sources, query) do - [ - expr(left, sources, query), - " IN (SELECT value FROM JSON_EACH(", - expr(right, sources, query), - ?), - ?) - ] - end - def expr({:is_nil, _, [arg]}, sources, query) do [expr(arg, sources, query) | " IS NULL"] end @@ -1416,6 +1444,10 @@ defmodule Ecto.Adapters.SQLite3.Connection do ["CAST(", expr(other, sources, query), " AS ", column_type(type, query), ?)] end + def expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) do + ["CAST(", expr(other, sources, query), " AS ", column_type(type, query), ?)] + end + def expr(nil, _sources, _query), do: "NULL" def expr(true, _sources, _query), do: "1" def expr(false, _sources, _query), do: "0" diff --git a/test/ecto/adapters/sqlite3/pg_connection_test.exs b/test/ecto/adapters/sqlite3/pg_connection_test.exs index d529adc..06e4553 100644 --- a/test/ecto/adapters/sqlite3/pg_connection_test.exs +++ b/test/ecto/adapters/sqlite3/pg_connection_test.exs @@ -705,8 +705,7 @@ defmodule Ecto.Adapters.SQLite3Test do |> select([e], e in [1, 2, 3]) |> plan() - assert all(query) == - ~s{SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM "schema" AS s0} + assert all(query) == ~s{SELECT s0 IN (1,2,3) FROM "schema" AS s0} query = Schema |> select([e], 1 in [1,e.x,3]) |> plan() @@ -939,8 +938,6 @@ defmodule Ecto.Adapters.SQLite3Test do ~s{UPDATE "first"."schema" AS s0 SET "x" = 0} end - # TODO should raise? - """ test "update all with left join" do query = from(m in Schema, join: x in assoc(m, :comments), left_join: p in assoc(m, :permalink), update: [set: [w: m.list2]]) |> plan(:update_all) assert update_all(query) == @@ -951,7 +948,6 @@ defmodule Ecto.Adapters.SQLite3Test do query = from(m in Schema, left_join: p in assoc(m, :permalink), left_join: x in assoc(m, :permalink), update: [set: [w: m.list2]]) |> plan(:update_all) assert update_all(query) == ~s{select} end - """ test "delete all" do query = Schema |> Queryable.to_query |> plan() @@ -1092,10 +1088,8 @@ defmodule Ecto.Adapters.SQLite3Test do ~s{INNER JOIN "schema" AS s2 ON 1} end - #TODO ahh probably should raise - """ test "join with hints" do - assert_raise Ecto.QueryError, ~r/table hints are not supported by PostgreSQL/, fn -> + assert_raise Ecto.QueryError, ~r/join hints are not supported by SQLite3/, fn -> Schema |> join(:inner, [p], q in Schema2, hints: ["USE INDEX FOO", "USE INDEX BAR"]) |> select([], true) @@ -1103,7 +1097,6 @@ defmodule Ecto.Adapters.SQLite3Test do |> all() end end - """ test "join with nothing bound" do query = Schema |> join(:inner, [], q in Schema2, on: q.z == q.z) |> select([], true) |> plan() @@ -1309,17 +1302,24 @@ defmodule Ecto.Adapters.SQLite3Test do query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) assert query == ~s{INSERT INTO "schema" AS s0 ("x","y") VALUES (?,?) ON CONFLICT ("x","y") DO UPDATE SET "z" = 'foo' RETURNING "z"} - # For :replace_all - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {[:x, :y], [], [:id]}, []) - assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) ON CONFLICT ("id") DO UPDATE SET "x" = EXCLUDED."x","y" = EXCLUDED."y"} + assert_raise ArgumentError, "Upsert in SQLite3 requires :conflict_target", fn -> + conflict_target = [] + insert(nil, "schema", [:x, :y], [[:x, :y]], {:replace_all, [], conflict_target}, []) + end + + assert_raise ArgumentError, "Upsert in SQLite3 does not support ON CONSTRAINT", fn -> + insert(nil, "schema", [:x, :y], [[:x, :y]], {:replace_all, [], {:constraint, :foo}}, []) + end - # TODO - # query = insert(nil, "schema", [:x, :y], [[:x, :y]], {[:x, :y], [], {:unsafe_fragment, "(\"id\")"}}, []) - #assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) ON CONFLICT (\"id\") DO UPDATE SET "x" = EXCLUDED."x","y" = EXCLUDED."y"} + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:replace_all, [], [:id]}, []) - #assert_raise ArgumentError, "the :conflict_target option is required on upserts by PostgreSQL", fn -> - # insert(nil, "schema", [:x, :y], [[:x, :y]], {[:x, :y], [], []}, []) - #end + assert query == + """ + INSERT INTO "schema" ("x","y") \ + VALUES (?,?) \ + ON CONFLICT ("id") \ + DO UPDATE SET "x" = EXCLUDED."x","y" = EXCLUDED."y"\ + """ end test "insert with query" do @@ -1373,8 +1373,7 @@ defmodule Ecto.Adapters.SQLite3Test do # DDL alias Ecto.Migration.Reference - import Ecto.Migration, only: [table: 1, table: 2, index: 2, index: 3, - constraint: 2, constraint: 3] + import Ecto.Migration, only: [table: 1, table: 2, index: 2, index: 3, constraint: 3] test "executing a string during migration" do assert execute_ddl("example") == ["example"] @@ -1479,49 +1478,6 @@ defmodule Ecto.Adapters.SQLite3Test do """ |> remove_newlines] end - # TODO not relevant to SQLite? - """ - test "create table with identity key and references" do - create = {:create, table(:posts), - [{:add, :id, :integer, [primary_key: true]}, - {:add, :category_0, %Reference{table: :categories, type: :identity}, []}, - {:add, :name, :string, []}]} - - assert execute_ddl(create) == [""" - CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT BY DEFAULT AS IDENTITY, - "category_0" INTEGER, CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), - "name" TEXT, - PRIMARY KEY ("id")) """ |> remove_newlines] - end - - test "create table with identity key options" do - create = {:create, table(:posts), - [{:add, :id, :identity, [primary_key: true, start_value: 1_000, increment: 10]}, - {:add, :name, :string, []}]} - - assert execute_ddl(create) == [""" - CREATE TABLE "posts" ("id" INTEGER GENERATED BY DEFAULT AS IDENTITY(START WITH 1000 INCREMENT BY 10) , "name" TEXT, PRIMARY KEY ("id")) """ |> remove_newlines] - end - - test "create table with binary column and null-byte default" do - create = {:create, table(:blobs), - [{:add, :blob, :binary, [default: <<0>>]}]} - - assert_raise ArgumentError, ~r/"\\x00"/, fn -> - execute_ddl(create) - end - end - - test "create table with binary column and null-byte-containing default" do - create = {:create, table(:blobs), - [{:add, :blob, :binary, [default: "foo" <> <<0>>]}]} - - assert_raise ArgumentError, ~r/"\\x666f6f00"/, fn -> - execute_ddl(create) - end - end - """ - test "create table with binary column and UTF-8 default" do create = {:create, table(:blobs), [{:add, :blob, :binary, [default: "foo"]}]} @@ -1638,12 +1594,12 @@ defmodule Ecto.Adapters.SQLite3Test do end test "drop table" do - drop = {:drop, table(:posts), :restrict} + drop = {:drop, table(:posts)} assert execute_ddl(drop) == [~s|DROP TABLE "posts"|] end test "drop table with prefix" do - drop = {:drop, table(:posts, prefix: :foo), :restrict} + drop = {:drop, table(:posts, prefix: :foo)} assert execute_ddl(drop) == [~s|DROP TABLE "foo"."posts"|] end @@ -1767,75 +1723,65 @@ defmodule Ecto.Adapters.SQLite3Test do end # TODO Not Supported? - """ test "create index with include fields" do create = {:create, index(:posts, [:permalink], unique: true, include: [:public])} - assert execute_ddl(create) == - [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") INCLUDE ("public")|] - - create = {:create, index(:posts, [:permalink], unique: true, include: [:public], where: "public IS 1")} - assert execute_ddl(create) == - [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") INCLUDE ("public") WHERE public IS 1|] + assert_raise ArgumentError, fn -> + execute_ddl(create) + end end test "create unique index with nulls_distinct option" do create = {:create, index(:posts, [:permalink], unique: true, nulls_distinct: true)} - assert execute_ddl(create) == - [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") NULLS DISTINCT|] - - create = {:create, index(:posts, [:permalink], unique: true, nulls_distinct: false)} - assert execute_ddl(create) == - [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") NULLS NOT DISTINCT|] - - create = {:create, index(:posts, [:permalink], unique: true, nulls_distinct: false, include: [:public], where: "public IS 1")} - assert execute_ddl(create) == - [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") INCLUDE ("public") NULLS NOT DISTINCT WHERE public IS 1|] + assert_raise ArgumentError, fn -> + execute_ddl(create) + end end test "create index concurrently not supported" do index = index(:posts, [:permalink]) create = {:create, %{index | concurrently: true}} - assert execute_ddl(create) == - [~s|CREATE INDEX "posts_permalink_index" ON "posts" ("permalink")|] - end - - test "create unique index concurrently not supported" do - index = index(:posts, [:permalink], unique: true) - create = {:create, %{index | concurrently: true}} - assert execute_ddl(create) == - [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink")|] + assert_raise ArgumentError, fn -> + execute_ddl(create) + end end - """ test "create an index using a different type" do create = {:create, index(:posts, [:permalink], using: :hash)} - assert execute_ddl(create) == - [~s|CREATE INDEX "posts_permalink_index" ON "posts" ("permalink")|] + assert_raise ArgumentError, fn -> + execute_ddl(create) + end end - # TODO doesn't support - """ test "create an index without recursively creating indexes on partitions" do create = {:create, index(:posts, [:permalink], only: true)} - assert execute_ddl(create) == - [~s|CREATE INDEX "posts_permalink_index" ON ONLY "posts" ("permalink")|] + assert_raise ArgumentError, fn -> + execute_ddl(create) + end end - """ test "drop index" do - drop = {:drop, index(:posts, [:id], name: "posts$main"), :restrict} + drop = {:drop, index(:posts, [:id], name: "posts$main")} assert execute_ddl(drop) == [~s|DROP INDEX "posts$main"|] end test "drop index with prefix" do - drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo), :restrict} + drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo)} assert execute_ddl(drop) == [~s|DROP INDEX "foo"."posts$main"|] end + test "drop index mode not supported" do + assert_raise ArgumentError, fn -> + drop = {:drop, index(:posts, [:id], name: "posts$main"), :restrict} + execute_ddl(drop) + end + end + test "drop index concurrently not supported" do index = index(:posts, [:id], name: "posts$main") - drop = {:drop, %{index | concurrently: true}, :restrict} - assert execute_ddl(drop) == [~s|DROP INDEX "posts$main"|] + assert_raise ArgumentError, fn -> + drop = {:drop, %{index | concurrently: true}} + execute_ddl(drop) + end end test "drop index with cascade" do @@ -1850,55 +1796,17 @@ defmodule Ecto.Adapters.SQLite3Test do end end - # TODO SQLITE doesn't support alter table but you could add/remove? - """ - test "create check constraint" do - create = {:create, constraint(:products, "price_must_be_positive", check: "price > 0")} - assert execute_ddl(create) == - [~s|ALTER TABLE "products" ADD CONSTRAINT "price_must_be_positive" CHECK (price > 0)|] - - create = {:create, constraint(:products, "price_must_be_positive", check: "price > 0", prefix: "foo")} - assert execute_ddl(create) == - [~s|ALTER TABLE "foo"."products" ADD CONSTRAINT "price_must_be_positive" CHECK (price > 0)|] - end - - test "create exclusion constraint" do - create = {:create, constraint(:products, "price_must_be_positive", exclude: ~s|gist (int4range("from", "to", '[]') WITH &&)|)} - assert execute_ddl(create) == - [~s|ALTER TABLE "products" ADD CONSTRAINT "price_must_be_positive" EXCLUDE USING gist (int4range("from", "to", '[]') WITH &&)|] - end - - test "create constraint with comment" do - create = {:create, constraint(:products, "price_must_be_positive", check: "price > 0", prefix: "foo", comment: "comment")} - assert execute_ddl(create) == [remove_newlines("ALTER TABLE "foo"."products" ADD CONSTRAINT "price_must_be_positive" CHECK (price > 0)"), - ~s|COMMENT ON CONSTRAINT "price_must_be_positive" ON "foo"."products" IS 'comment'|] - end - - test "create invalid constraint" do - create = {:create, constraint(:products, "price_must_be_positive", check: "price > 0", prefix: "foo", validate: false)} - assert execute_ddl(create) == [~s|ALTER TABLE "foo"."products" ADD CONSTRAINT "price_must_be_positive" CHECK (price > 0) NOT VALID|] - end - test "drop constraint" do - drop = {:drop, constraint(:products, "price_must_be_positive"), :restrict} - assert execute_ddl(drop) == - [~s|ALTER TABLE "products" DROP CONSTRAINT "price_must_be_positive"|] - - drop = {:drop, constraint(:products, "price_must_be_positive", prefix: "foo"), :restrict} - assert execute_ddl(drop) == - [~s|ALTER TABLE "foo"."products" DROP CONSTRAINT "price_must_be_positive"|] + assert_raise ArgumentError, ~r/SQLite3 does not support ALTER TABLE DROP CONSTRAINT./, fn -> + execute_ddl({:drop, constraint(:products, "price_must_be_positive", prefix: :foo), :restrict}) + end end test "drop_if_exists constraint" do - drop = {:drop_if_exists, constraint(:products, "price_must_be_positive"), :restrict} - assert execute_ddl(drop) == - [~s|ALTER TABLE "products" DROP CONSTRAINT IF EXISTS "price_must_be_positive"|] - - drop = {:drop_if_exists, constraint(:products, "price_must_be_positive", prefix: "foo"), :restrict} - assert execute_ddl(drop) == - [~s|ALTER TABLE "foo"."products" DROP CONSTRAINT IF EXISTS "price_must_be_positive"|] + assert_raise ArgumentError, ~r/SQLite3 does not support ALTER TABLE DROP CONSTRAINT./, fn -> + execute_ddl({:drop_if_exists, constraint(:products, "price_must_be_positive", prefix: :foo), :restrict}) + end end - """ test "rename table" do rename = {:rename, table(:posts), table(:new_posts)} From b30db8e0c67b3ec823b27f3dd966e6af051c7347 Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Tue, 24 Jan 2023 13:15:31 -0600 Subject: [PATCH 05/17] Distincts --- lib/ecto/adapters/sqlite3/connection.ex | 30 +++++------- .../adapters/sqlite3/pg_connection_test.exs | 49 ++++--------------- 2 files changed, 20 insertions(+), 59 deletions(-) diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index 0499a9c..cdb3479 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -866,9 +866,12 @@ defmodule Ecto.Adapters.SQLite3.Connection do def handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} - def distinct(nil, _sources, _query), do: [] - def distinct(%QueryExpr{expr: false}, _sources, _query), do: [] - def distinct(%QueryExpr{expr: _}, _sources, _query), do: "DISTINCT " + defp distinct(nil, _sources, _query), do: [] + defp distinct(%QueryExpr{expr: true}, _sources, _query), do: "DISTINCT " + defp distinct(%QueryExpr{expr: false}, _sources, _query), do: [] + defp distinct(%QueryExpr{expr: exprs}, _sources, query) when is_list(exprs) do + raise Ecto.QueryError, query: query, message: "DISTINCT with multiple columns is not supported by SQLite3" + end def select(%{select: %{fields: fields}, distinct: distinct} = query, sources) do [ @@ -1114,23 +1117,12 @@ defmodule Ecto.Adapters.SQLite3.Connection do def order_by(%{order_bys: order_bys} = query, sources) do order_bys = Enum.flat_map(order_bys, & &1.expr) - distinct = Map.get(query, :distinct, nil) - - order_bys = if distinct do - order_by_concat(List.wrap(distinct.expr), order_bys) - else - order_bys - end - [ " ORDER BY " | intersperse_map(order_bys, ", ", &order_by_expr(&1, sources, query)) ] end - defp order_by_concat([head | left], [head | right]), do: [head | order_by_concat(left, right)] - defp order_by_concat(left, right), do: left ++ right - defp order_by_expr({dir, expression}, sources, query) do str = expr(expression, sources, query) @@ -1404,7 +1396,11 @@ defmodule Ecto.Adapters.SQLite3.Connection do def expr({fun, _, args}, sources, query) when is_atom(fun) and is_list(args) do {modifier, args} = case args do - [rest, :distinct] -> {"DISTINCT ", [rest]} + [_rest, :distinct] -> + raise Ecto.QueryError, + query: query, + message: "Distinct not supported in expressions" + _ -> {[], args} end @@ -1444,10 +1440,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do ["CAST(", expr(other, sources, query), " AS ", column_type(type, query), ?)] end - def expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) do - ["CAST(", expr(other, sources, query), " AS ", column_type(type, query), ?)] - end - def expr(nil, _sources, _query), do: "NULL" def expr(true, _sources, _query), do: "1" def expr(false, _sources, _query), do: "0" diff --git a/test/ecto/adapters/sqlite3/pg_connection_test.exs b/test/ecto/adapters/sqlite3/pg_connection_test.exs index 06e4553..bdbce8b 100644 --- a/test/ecto/adapters/sqlite3/pg_connection_test.exs +++ b/test/ecto/adapters/sqlite3/pg_connection_test.exs @@ -305,8 +305,10 @@ defmodule Ecto.Adapters.SQLite3Test do query = Schema |> select([r], count(r.x)) |> plan() assert all(query) == ~s{SELECT count(s0."x") FROM "schema" AS s0} - query = Schema |> select([r], count(r.x, :distinct)) |> plan() - assert all(query) == ~s{SELECT count(DISTINCT s0."x") FROM "schema" AS s0} + assert_raise Ecto.QueryError, fn -> + query = Schema |> select([r], count(r.x, :distinct)) |> plan() + all(query) + end query = Schema |> select([r], count()) |> plan() assert all(query) == ~s{SELECT count(*) FROM "schema" AS s0} @@ -324,24 +326,6 @@ defmodule Ecto.Adapters.SQLite3Test do end test "distinct" do - query = Schema |> distinct([r], r.x) |> select([r], {r.x, r.y}) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} - - query = Schema |> distinct([r], desc: r.x) |> select([r], {r.x, r.y}) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} - - query = Schema |> distinct([r], 2) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x" FROM "schema" AS s0} - - query = Schema |> distinct([r], [r.x, r.y]) |> select([r], {r.x, r.y}) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} - - query = Schema |> distinct([r], [asc: r.x, desc: r.y]) |> select([r], {r.x, r.y}) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} - - query = Schema |> distinct([r], [asc_nulls_first: r.x, desc_nulls_last: r.y]) |> select([r], {r.x, r.y}) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} - query = Schema |> distinct([r], true) |> select([r], {r.x, r.y}) |> plan() assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} @@ -353,26 +337,11 @@ defmodule Ecto.Adapters.SQLite3Test do query = Schema |> distinct(false) |> select([r], {r.x, r.y}) |> plan() assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} - end - test "distinct with order by" do - query = Schema |> order_by([r], [r.y]) |> distinct([r], desc: r.x) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x" FROM "schema" AS s0 ORDER BY s0."x" DESC, s0."y"} - - query = Schema |> order_by([r], [r.y]) |> distinct([r], desc_nulls_last: r.x) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x" FROM "schema" AS s0 ORDER BY s0."x" DESC NULLS LAST, s0."y"} - - # Duplicates - query = Schema |> order_by([r], desc: r.x) |> distinct([r], desc: r.x) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x" FROM "schema" AS s0 ORDER BY s0."x" DESC} - - assert Schema - |> order_by([r], desc: r.x) - |> distinct([r], desc: r.x) - |> select([r], r.x) - |> plan() - |> all() == - ~s{SELECT DISTINCT s0."x" FROM "schema" AS s0 ORDER BY s0."x" DESC} + assert_raise Ecto.QueryError, ~r"DISTINCT with multiple columns is not supported by SQLite3", fn -> + query = Schema |> distinct([r], [r.x, r.y]) |> select([r], {r.x, r.y}) |> plan() + all(query) + end end test "coalesce" do @@ -895,7 +864,7 @@ defmodule Ecto.Adapters.SQLite3Test do ~s{UPDATE "schema" AS s0 SET "x" = ? WHERE (s0."x" = ?) RETURNING "x" = ?} end - # TODO We don;t have actual arrays so probably no? + # TODO With JSON array stuff test "update all array ops" do query = from(m in Schema, update: [push: [w: 0]]) |> plan(:update_all) assert_raise Ecto.QueryError, fn -> From f94d4e62f830c8999b9894add63a25dd900894b5 Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Tue, 24 Jan 2023 14:11:07 -0600 Subject: [PATCH 06/17] more fixes --- lib/ecto/adapters/sqlite3/connection.ex | 41 +++++--- .../ecto/adapters/sqlite3/connection_test.exs | 2 +- .../adapters/sqlite3/pg_connection_test.exs | 97 ++++++++++--------- test/ecto/adapters/sqlite3_test.exs | 2 +- 4 files changed, 80 insertions(+), 62 deletions(-) diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index cdb3479..ea87ec1 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -375,6 +375,11 @@ defmodule Ecto.Adapters.SQLite3.Connection do raise ArgumentError, "SQLite3 adapter does not support keyword lists in :options" end + @impl true + def execute_ddl({_command, %Table{comment: comment}, _}) when not is_nil(comment) do + raise ArgumentError, "SQLite3 adapter does not support comments" + end + @impl true def execute_ddl({:create, %Table{} = table, columns}) do {table, composite_pk_def} = composite_pk_definition(table, columns) @@ -463,10 +468,15 @@ defmodule Ecto.Adapters.SQLite3.Connection do end) end + @impl true + def execute_ddl({_, %Index{comment: c}}) when not is_nil(c) do + raise ArgumentError, "comment is not supported with SQLite3" + end + @impl true def execute_ddl({_, %Index{concurrently: true}}) do raise ArgumentError, "`concurrently` is not supported with SQLite3" - end + end @impl true def execute_ddl({_, %Index{only: true}}) do @@ -986,6 +996,18 @@ defmodule Ecto.Adapters.SQLite3.Connection do ] end + defp update_op(:push, quoted_key, value, sources, query) do + [ + quoted_key, " = JSON_INSERT(", quoted_key, ",'$[#]',", expr(value, sources, query), ?) + ] + end + + defp update_op(:pull, _quoted_key, _value, _sources, query) do + raise Ecto.QueryError, + query: query, + message: "pull is not supported for SQLite3, if you can figure out a way to do with JSON array's please pull request into ecto_sqlite3." + end + defp update_op(command, _quoted_key, _value, _sources, query) do raise Ecto.QueryError, query: query, @@ -1000,15 +1022,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do %JoinExpr{qual: _qual, ix: ix, source: source} -> {join, name} = get_source(query, sources, ix, source) [join, " AS " | name] - - # This is hold over from sqlite_ecto2. According to sqlite3 - # documentation, all of the join types are allowed. - # - # %JoinExpr{qual: qual} -> - # raise Ecto.QueryError, - # query: query, - # message: - # "SQLite3 adapter supports only inner joins on #{kind}, got: `#{qual}`" end) wheres = @@ -1415,10 +1428,10 @@ defmodule Ecto.Adapters.SQLite3.Connection do end # TODO It technically is, its just a json array, so we *could* support it - def expr(list, _sources, query) when is_list(list) do - raise Ecto.QueryError, - query: query, - message: "Array type is not supported by SQLite3" + def expr(list, _sources, _query) when is_list(list) do + library = Application.get_env(:ecto_sqlite3, :json_library, Jason) + expression = IO.iodata_to_binary(library.encode_to_iodata!(list)) + ["JSON_ARRAY('", expression, "')"] end def expr(%Decimal{} = decimal, _sources, _query) do diff --git a/test/ecto/adapters/sqlite3/connection_test.exs b/test/ecto/adapters/sqlite3/connection_test.exs index 1dc3c9f..b6746c6 100644 --- a/test/ecto/adapters/sqlite3/connection_test.exs +++ b/test/ecto/adapters/sqlite3/connection_test.exs @@ -336,7 +336,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do end test "common table expression delete_all" do - cte_query = from(x in Schema, order_by: [asc: :id], limit: 10, select: %{id: x.id}) + cte_query = from(x in Schema, order_by: [asc: :id], inner_join: q in Schema2, on: x.x == q.z, limit: 10, select: %{id: x.id}) query = Schema diff --git a/test/ecto/adapters/sqlite3/pg_connection_test.exs b/test/ecto/adapters/sqlite3/pg_connection_test.exs index bdbce8b..15ecec8 100644 --- a/test/ecto/adapters/sqlite3/pg_connection_test.exs +++ b/test/ecto/adapters/sqlite3/pg_connection_test.exs @@ -212,7 +212,7 @@ defmodule Ecto.Adapters.SQLite3Test do ~s{INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"} end - # TODO should we warn about locks? or yell? + # TODO should error on lock test "CTE update_all" do cte_query = from(x in Schema, order_by: [asc: :id], limit: 10, lock: "FOR UPDATE SKIP LOCKED", select: %{id: x.id}) @@ -235,28 +235,22 @@ defmodule Ecto.Adapters.SQLite3Test do ~s{RETURNING "id", "x", "y", "z", "w", "meta"} end - # TODO Joins not supported with SQLite - """ test "CTE delete_all" do cte_query = - from(x in Schema, order_by: [asc: :id], limit: 10, lock: "FOR UPDATE SKIP LOCKED", select: %{id: x.id}) + from(x in Schema, order_by: [asc: :id], limit: 10, inner_join: q in Schema2, on: x.x == q.z, select: %{id: x.id}) query = Schema |> with_cte("target_rows", as: ^cte_query) - |> join(:inner, [row], target in "target_rows", on: target.id == row.id) |> select([r, t], r) |> plan(:delete_all) assert delete_all(query) == ~s{WITH "target_rows" AS } <> - ~s{(SELECT ss0."id" AS "id" FROM "schema" AS ss0 ORDER BY ss0."id" LIMIT 10 FOR UPDATE SKIP LOCKED) } <> + ~s{(SELECT ss0."id" AS "id" FROM "schema" AS ss0 INNER JOIN "schema2" AS ss1 ON ss0."x" = ss1."z" ORDER BY ss0."id" LIMIT 10) } <> ~s{DELETE FROM "schema" AS s0 } <> - ~s{USING "target_rows" AS t1 } <> - ~s{WHERE (t1."id" = s0."id") } <> ~s{RETURNING "id", "x", "y", "z", "w", "meta"} end - """ test "parent binding subquery and CTE" do initial_query = @@ -591,7 +585,6 @@ defmodule Ecto.Adapters.SQLite3Test do query = Schema |> select([], type(^"601d74e4-a8d3-4b6e-8365-eddb4c893327", Ecto.UUID)) |> plan() assert all(query) == ~s{SELECT CAST(? AS TEXT) FROM "schema" AS s0} - # TODO Arrays not supported? query = Schema |> select([], type(^["601d74e4-a8d3-4b6e-8365-eddb4c893327"], {:array, Ecto.UUID})) |> plan() assert all(query) == ~s{SELECT CAST(? AS TEXT) FROM "schema" AS s0} end @@ -733,8 +726,6 @@ defmodule Ecto.Adapters.SQLite3Test do end - # TODO Add Array support via json - """ test "arrays and sigils" do query = Schema |> select([], fragment("?", [1, 2, 3])) |> plan() assert all(query) == ~s{SELECT JSON_ARRAY('[1,2,3]') FROM "schema" AS s0} @@ -743,9 +734,8 @@ defmodule Ecto.Adapters.SQLite3Test do assert all(query) == ~s{SELECT JSON_ARRAY('["abc","def"]') FROM "schema" AS s0} query = Schema |> where([s], s.w == []) |> select([s], s.w) |> plan() - assert all(query) == ~s{SELECT s0."w" FROM "schema" AS s0 WHERE (s0."w" = '\{\}')} + assert all(query) == ~s{SELECT s0."w" FROM "schema" AS s0 WHERE (s0."w" = JSON_ARRAY('[]'))} end - """ test "interpolated values" do @@ -864,12 +854,9 @@ defmodule Ecto.Adapters.SQLite3Test do ~s{UPDATE "schema" AS s0 SET "x" = ? WHERE (s0."x" = ?) RETURNING "x" = ?} end - # TODO With JSON array stuff test "update all array ops" do query = from(m in Schema, update: [push: [w: 0]]) |> plan(:update_all) - assert_raise Ecto.QueryError, fn -> - update_all(query) - end + assert update_all(query) == ~s{UPDATE "schema" AS s0 SET "w" = JSON_INSERT("w",'$[#]',0)} query = from(m in Schema, update: [pull: [w: 0]]) |> plan(:update_all) assert_raise Ecto.QueryError, fn -> @@ -907,17 +894,13 @@ defmodule Ecto.Adapters.SQLite3Test do ~s{UPDATE "first"."schema" AS s0 SET "x" = 0} end + # TODO this is broken? test "update all with left join" do query = from(m in Schema, join: x in assoc(m, :comments), left_join: p in assoc(m, :permalink), update: [set: [w: m.list2]]) |> plan(:update_all) assert update_all(query) == ~s{UPDATE "schema" AS s0 SET "w" = s0."list2" FROM "schema2" AS s1 LEFT OUTER JOIN "schema3" AS s2 ON s2."id" = s0."y" WHERE (s1."z" = s0."x")} end - test "update all with left join but no inner join" do - query = from(m in Schema, left_join: p in assoc(m, :permalink), left_join: x in assoc(m, :permalink), update: [set: [w: m.list2]]) |> plan(:update_all) - assert update_all(query) == ~s{select} - end - test "delete all" do query = Schema |> Queryable.to_query |> plan() assert delete_all(query) == ~s{DELETE FROM "schema" AS s0} @@ -1379,17 +1362,30 @@ defmodule Ecto.Adapters.SQLite3Test do """ |> remove_newlines] end - # TODO should we raise on comment? - test "create table with comment on columns" do + test "raise on table comment" do create = {:create, table(:posts, comment: "comment"), [ {:add, :category_0, %Reference{table: :categories}, [comment: "column comment"]}, {:add, :created_at, :timestamp, []}, {:add, :updated_at, :timestamp, [comment: "column comment 2"]} ]} + + assert_raise ArgumentError, ~r/comment/, fn -> + execute_ddl(create) + end + end + + # TODO should we raise on comment? + test "create table with comment on columns" do + create = {:create, table(:posts), + [ + {:add, :category_0, %Reference{table: :categories}, []}, + {:add, :created_at, :timestamp, []}, + {:add, :updated_at, :timestamp, []} + ]} assert execute_ddl(create) == [remove_newlines(""" - CREATE TABLE "posts" - ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), "created_at" TEXT, "updated_at" TEXT) + CREATE TABLE "posts" + ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), "created_at" TEXT, "updated_at" TEXT) """) ] end @@ -1603,21 +1599,6 @@ defmodule Ecto.Adapters.SQLite3Test do ] end - test "alter table with comments on table and columns, ignore comments" do - alter = {:alter, table(:posts, comment: "table comment"), - [{:add, :title, :string, [default: "Untitled", size: 100, null: false, comment: "column comment"]}, - {:remove, :summary} - ]} - - assert execute_ddl(alter) == [remove_newlines(""" - ALTER TABLE "posts" - ADD COLUMN "title" TEXT DEFAULT 'Untitled' NOT NULL - """), remove_newlines(""" - ALTER TABLE "posts" - DROP COLUMN "summary" - """)] - end - test "alter table with prefix" do alter = {:alter, table(:posts, prefix: :foo), [{:add, :author_id, %Reference{table: :author}, []}]} @@ -1668,8 +1649,15 @@ defmodule Ecto.Adapters.SQLite3Test do [~s|CREATE INDEX "posts$main" ON "foo"."posts" (lower(permalink))|] end - test "create index with comment" do + test "raise on create index with comment" do create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo, comment: "comment")} + assert_raise ArgumentError, ~r/comment/, fn -> + execute_ddl(create) + end + end + + test "create index with comment" do + create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo)} assert execute_ddl(create) == [remove_newlines(""" CREATE INDEX "posts_category_id_permalink_index" ON "foo"."posts" ("category_id", "permalink") """)] @@ -1691,7 +1679,6 @@ defmodule Ecto.Adapters.SQLite3Test do [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") WHERE public|] end - # TODO Not Supported? test "create index with include fields" do create = {:create, index(:posts, [:permalink], unique: true, include: [:public])} assert_raise ArgumentError, fn -> @@ -1710,14 +1697,14 @@ defmodule Ecto.Adapters.SQLite3Test do index = index(:posts, [:permalink]) create = {:create, %{index | concurrently: true}} assert_raise ArgumentError, fn -> - execute_ddl(create) + execute_ddl(create) end end test "create an index using a different type" do create = {:create, index(:posts, [:permalink], using: :hash)} assert_raise ArgumentError, fn -> - execute_ddl(create) + execute_ddl(create) end end @@ -1797,6 +1784,24 @@ defmodule Ecto.Adapters.SQLite3Test do assert execute_ddl(rename) == [~s|ALTER TABLE "foo"."posts" RENAME COLUMN "given_name" TO "first_name"|] end + test "autoincrement support" do + serial = {:create, table(:posts), [{:add, :id, :serial, [primary_key: true]}]} + bigserial = {:create, table(:posts), [{:add, :id, :bigserial, [primary_key: true]}]} + id = {:create, table(:posts), [{:add, :id, :id, [primary_key: true]}]} + integer = {:create, table(:posts), [{:add, :id, :integer, [primary_key: true]}]} + + assert execute_ddl(serial) == [ + ~s/CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT)/ + ] + + assert execute_ddl(bigserial) == [ + ~s/CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT)/ + ] + + assert execute_ddl(id) == [~s/CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY)/] + assert execute_ddl(integer) == [~s/CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY)/] + end + defp remove_newlines(string) do string |> String.trim |> String.replace("\n", " ") end diff --git a/test/ecto/adapters/sqlite3_test.exs b/test/ecto/adapters/sqlite3_test.exs index 6144f7f..20b0b1a 100644 --- a/test/ecto/adapters/sqlite3_test.exs +++ b/test/ecto/adapters/sqlite3_test.exs @@ -1,4 +1,4 @@ -defmodule Ecto.Adapters.SQLite3Test do +defmodule Ecto.Adapters.SQLite3ConnTest do use ExUnit.Case alias Ecto.Adapters.SQLite3 From a690d122d41bcecb4f0e571ec1363a54258568e3 Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Tue, 24 Jan 2023 15:26:01 -0600 Subject: [PATCH 07/17] almost working before I remove array support --- lib/ecto/adapters/sqlite3.ex | 22 +- lib/ecto/adapters/sqlite3/connection.ex | 71 +- lib/ecto/adapters/sqlite3/data_type.ex | 6 +- .../ecto/adapters/sqlite3/connection_test.exs | 2656 +++++++---------- .../adapters/sqlite3/pg_connection_test.exs | 1808 ----------- test/ecto/integration/crud_test.exs | 2 +- 6 files changed, 1202 insertions(+), 3363 deletions(-) delete mode 100644 test/ecto/adapters/sqlite3/pg_connection_test.exs diff --git a/lib/ecto/adapters/sqlite3.ex b/lib/ecto/adapters/sqlite3.ex index 9da7400..5b575e7 100644 --- a/lib/ecto/adapters/sqlite3.ex +++ b/lib/ecto/adapters/sqlite3.ex @@ -322,18 +322,18 @@ defmodule Ecto.Adapters.SQLite3 do end @impl Ecto.Adapter - def loaders({:map, _}, type) do - [&Codec.json_decode/1, &Ecto.Type.embedded_load(type, &1, :json)] + def loaders({:array, _}, type) do + [&Codec.json_decode/1, type] end @impl Ecto.Adapter - def loaders({:array, _}, type) do - [&Codec.json_decode/1, type] + def loaders({:map, _}, type) do + [&Codec.json_decode/1, &Ecto.Type.embedded_load(type, &1, :json)] end @impl Ecto.Adapter def loaders(:map, type) do - [&Codec.json_decode/1, type] + [&Codec.json_decode/1, type] end @impl Ecto.Adapter @@ -439,18 +439,20 @@ defmodule Ecto.Adapters.SQLite3 do [type, &Codec.naive_datetime_encode(&1, dt_type)] end - @impl Ecto.Adapter - def dumpers({:map, _}, type), do: [&Ecto.Type.embedded_dump(type, &1, :json)] + def dumpers({:array, _}, type) do + [type, &Codec.json_encode/1] + end @impl Ecto.Adapter - def dumpers({:array, _}, type), do: [&Ecto.Type.embedded_dump(type, &1, :json)] + def dumpers({:in, sub}, {:in, sub}), do: [{:array, sub}, &Codec.json_encode/1] @impl Ecto.Adapter - def dumpers({:in, sub}, {:in, sub}), do: [{:array, sub}] + def dumpers({:map, _}, type), do: [type, &Ecto.Type.embedded_dump(type, &1, :json)] @impl Ecto.Adapter - def dumpers(_, type), do: [type] + def dumpers(_, type), do: [type] + ## ## HELPERS diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index ea87ec1..a365a7aa 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -14,6 +14,7 @@ defmodule Ecto.Adapters.SQLite3.Connection do alias Ecto.Query.WithExpr import Ecto.Adapters.SQLite3.DataType + alias Ecto.Adapters.SQLite3.Codec @parent_as __MODULE__ @@ -434,10 +435,11 @@ defmodule Ecto.Adapters.SQLite3.Connection do def execute_ddl({:drop, %Table{} = table, mode}) do if mode != [] do raise ArgumentError, """ - `#{inspect(mode)}` is not supported for DROP TABLE with SQLite3 \ - DROP TABLE #{table.name} cannot have options set. - """ + `#{inspect(mode)}` is not supported for DROP TABLE with SQLite3 \ + DROP TABLE #{table.name} cannot have options set. + """ end + execute_ddl({:drop, table}) end @@ -486,7 +488,7 @@ defmodule Ecto.Adapters.SQLite3.Connection do @impl true def execute_ddl({_, %Index{include: x}}) when length(x) != 0 do raise ArgumentError, "`include` is not supported with SQLite3" - end + end @impl true def execute_ddl({_, %Index{using: x}}) when not is_nil(x) do @@ -498,7 +500,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do raise ArgumentError, "`nulls_distinct` is not supported with SQLite3" end - @impl true def execute_ddl({:create, %Index{} = index}) do fields = intersperse_map(index.columns, ", ", &index_expr/1) @@ -553,10 +554,11 @@ defmodule Ecto.Adapters.SQLite3.Connection do def execute_ddl({:drop, %Index{} = index, mode}) do if mode != [] do raise ArgumentError, """ - `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ - DROP INDEX #{index.name} cannot have options set. - """ + `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ + DROP INDEX #{index.name} cannot have options set. + """ end + execute_ddl({:drop, index}) end @@ -579,10 +581,11 @@ defmodule Ecto.Adapters.SQLite3.Connection do def execute_ddl({:drop_if_exists, %Index{} = index, mode}) do if mode != [] do raise ArgumentError, """ - `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ - DROP INDEX #{index.name} cannot have options set. - """ + `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ + DROP INDEX #{index.name} cannot have options set. + """ end + execute_ddl({:drop_if_exists, index}) end @@ -877,10 +880,13 @@ defmodule Ecto.Adapters.SQLite3.Connection do def handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} defp distinct(nil, _sources, _query), do: [] - defp distinct(%QueryExpr{expr: true}, _sources, _query), do: "DISTINCT " + defp distinct(%QueryExpr{expr: true}, _sources, _query), do: "DISTINCT " defp distinct(%QueryExpr{expr: false}, _sources, _query), do: [] + defp distinct(%QueryExpr{expr: exprs}, _sources, query) when is_list(exprs) do - raise Ecto.QueryError, query: query, message: "DISTINCT with multiple columns is not supported by SQLite3" + raise Ecto.QueryError, + query: query, + message: "DISTINCT with multiple columns is not supported by SQLite3" end def select(%{select: %{fields: fields}, distinct: distinct} = query, sources) do @@ -998,14 +1004,20 @@ defmodule Ecto.Adapters.SQLite3.Connection do defp update_op(:push, quoted_key, value, sources, query) do [ - quoted_key, " = JSON_INSERT(", quoted_key, ",'$[#]',", expr(value, sources, query), ?) + quoted_key, + " = JSON_INSERT(", + quoted_key, + ",'$[#]',", + expr(value, sources, query), + ?) ] end defp update_op(:pull, _quoted_key, _value, _sources, query) do raise Ecto.QueryError, query: query, - message: "pull is not supported for SQLite3, if you can figure out a way to do with JSON array's please pull request into ecto_sqlite3." + message: + "pull is not supported for SQLite3, if you can figure out a way to do with JSON array's please pull request into ecto_sqlite3." end defp update_op(command, _quoted_key, _value, _sources, query) do @@ -1307,6 +1319,19 @@ defmodule Ecto.Adapters.SQLite3.Connection do [expr(left, sources, query), " IN ", expr(subquery, sources, query)] end + # Super Hack to handle arrays in json + def expr({:in, a, [left,"["<> _ = right]}, sources, query) do + case Codec.json_decode(right) do + {:ok, arr} -> + expr({:in, a, [left, arr]}, sources, query) + _ -> + raise Ecto.QueryError, + query: query, + message: "Malformed query on right hand side of #{right} in." + + end + end + def expr({:is_nil, _, [arg]}, sources, query) do [expr(arg, sources, query) | " IS NULL"] end @@ -1409,12 +1434,13 @@ defmodule Ecto.Adapters.SQLite3.Connection do def expr({fun, _, args}, sources, query) when is_atom(fun) and is_list(args) do {modifier, args} = case args do - [_rest, :distinct] -> - raise Ecto.QueryError, + [_rest, :distinct] -> + raise Ecto.QueryError, query: query, message: "Distinct not supported in expressions" - - _ -> {[], args} + + _ -> + {[], args} end case handle_call(fun, length(args)) do @@ -1427,7 +1453,11 @@ defmodule Ecto.Adapters.SQLite3.Connection do end end - # TODO It technically is, its just a json array, so we *could* support it + # Hack cause I can't get arrays to work + def expr("[" <> _ = list, _sources, _query) do + ["JSON_ARRAY('", list, "')"] + end + def expr(list, _sources, _query) when is_list(list) do library = Application.get_env(:ecto_sqlite3, :json_library, Jason) expression = IO.iodata_to_binary(library.encode_to_iodata!(list)) @@ -1457,7 +1487,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do def expr(true, _sources, _query), do: "1" def expr(false, _sources, _query), do: "0" - def expr(literal, _sources, _query) when is_binary(literal) do [?', escape_string(literal), ?'] end diff --git a/lib/ecto/adapters/sqlite3/data_type.ex b/lib/ecto/adapters/sqlite3/data_type.ex index 9713d37..f4f5cc8 100644 --- a/lib/ecto/adapters/sqlite3/data_type.ex +++ b/lib/ecto/adapters/sqlite3/data_type.ex @@ -62,8 +62,8 @@ defmodule Ecto.Adapters.SQLite3.DataType do end def column_type(type, _) do - raise ArgumentError, - "unsupported type `#{inspect(type)}`. The type can either be an atom, a string " <> - "or a tuple of the form `{:map, t}` or `{:array, t}` where `t` itself follows the same conditions." + raise ArgumentError, + "unsupported type `#{inspect(type)}`. The type can either be an atom, a string " <> + "or a tuple of the form `{:map, t}` or `{:array, t}` where `t` itself follows the same conditions." end end diff --git a/test/ecto/adapters/sqlite3/connection_test.exs b/test/ecto/adapters/sqlite3/connection_test.exs index b6746c6..80236e6 100644 --- a/test/ecto/adapters/sqlite3/connection_test.exs +++ b/test/ecto/adapters/sqlite3/connection_test.exs @@ -1,41 +1,10 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do - use ExUnit.Case - - alias Ecto.Adapters.SQLite3 - alias Ecto.Adapters.SQLite3.Connection - alias Ecto.Migration.Reference + use ExUnit.Case, async: false import Ecto.Query - import Ecto.Migration, only: [table: 1, table: 2, index: 2, index: 3, constraint: 3] - - defmodule Comment do - use Ecto.Schema - - schema "comments" do - field(:content, :string) - end - end - - defmodule Post do - use Ecto.Schema - - schema "posts" do - field(:title, :string) - field(:content, :string) - has_many(:comments, Comment) - end - end - - # TODO: Let's rename these or make them more concrete and less terse so that - # tests are easier to read and understand what is happening. - # @warmwaffles 2021-03-11 - defmodule Schema3 do - use Ecto.Schema - schema "schema3" do - field(:binary, :binary) - end - end + alias Ecto.Queryable + alias Ecto.Adapters.SQLite3.Connection, as: SQL defmodule Schema do use Ecto.Schema @@ -44,6 +13,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do field(:x, :integer) field(:y, :integer) field(:z, :integer) + field(:w, {:array, :integer}) field(:meta, :map) has_many(:comments, Ecto.Adapters.SQLite3.ConnectionTest.Schema2, @@ -69,47 +39,42 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do end end - defp plan(query, operation \\ :all) do - {query, _cast_params, _dump_params} = - Ecto.Adapter.Queryable.plan_query(operation, SQLite3, query) + defmodule Schema3 do + use Ecto.Schema - query + schema "schema3" do + field(:list1, {:array, :string}) + field(:list2, {:array, :integer}) + field(:binary, :binary) + end end - defp all(query) do - query - |> Connection.all() - |> IO.iodata_to_binary() - end + defp plan(query, operation \\ :all) do + {query, _cast_params, _dump_params} = + Ecto.Adapter.Queryable.plan_query(operation, Ecto.Adapters.SQLite3, query) - defp update_all(query) do query - |> Connection.update_all() - |> IO.iodata_to_binary() end - defp delete_all(query) do - query - |> Connection.delete_all() - |> IO.iodata_to_binary() - end + defp all(query), do: query |> SQL.all() |> IO.iodata_to_binary() + defp update_all(query), do: query |> SQL.update_all() |> IO.iodata_to_binary() + defp delete_all(query), do: query |> SQL.delete_all() |> IO.iodata_to_binary() - defp execute_ddl(query) do - query - |> Connection.execute_ddl() - |> Enum.map(&IO.iodata_to_binary/1) + defp execute_ddl(query), + do: query |> SQL.execute_ddl() |> Enum.map(&IO.iodata_to_binary/1) + + defp insert(prefx, table, header, rows, on_conflict, returning, placeholders \\ []) do + IO.iodata_to_binary( + SQL.insert(prefx, table, header, rows, on_conflict, returning, placeholders) + ) end - defp insert(prefix, table, header, rows, on_conflict, returning, placeholders \\ []) do - prefix - |> Connection.insert(table, header, rows, on_conflict, returning, placeholders) - |> IO.iodata_to_binary() + defp update(prefx, table, fields, filter, returning) do + IO.iodata_to_binary(SQL.update(prefx, table, fields, filter, returning)) end - defp delete(prefix, table, filter, returning) do - prefix - |> Connection.delete(table, filter, returning) - |> IO.iodata_to_binary() + defp delete(prefx, table, filter, returning) do + IO.iodata_to_binary(SQL.delete(prefx, table, filter, returning)) end test "from" do @@ -118,113 +83,79 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do end test "from with hints" do - query = - Schema - |> from(hints: ["INDEXED BY FOO", "INDEXED BY BAR"]) - |> select([r], r.x) - |> plan() - - assert all(query) == - ~s{SELECT s0."x" FROM "schema" AS s0 INDEXED BY FOO INDEXED BY BAR} + query = Schema |> from(hints: "INDEXED BY FOO") |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 INDEXED BY FOO} end test "from without schema" do - query = - "posts" - |> select([r], r.x) - |> plan() - + query = "posts" |> select([r], r.x) |> plan() assert all(query) == ~s{SELECT p0."x" FROM "posts" AS p0} - query = - "posts" - |> select([r], fragment("?", r)) - |> plan() - + query = "posts" |> select([r], fragment("?", r)) |> plan() assert all(query) == ~s{SELECT p0 FROM "posts" AS p0} - query = - "Posts" - |> select([:x]) - |> plan() - + query = "Posts" |> select([:x]) |> plan() assert all(query) == ~s{SELECT P0."x" FROM "Posts" AS P0} - query = - "0posts" - |> select([:x]) - |> plan() - + query = "0posts" |> select([:x]) |> plan() assert all(query) == ~s{SELECT t0."x" FROM "0posts" AS t0} - assert_raise( - Ecto.QueryError, - ~r{SQLite3 does not support selecting all fields from "posts" without a schema}, - fn -> - from(p in "posts", select: p) |> plan() |> all() - end - ) + assert_raise Ecto.QueryError, + ~r"SQLite3 does not support selecting all fields from \"posts\" without a schema", + fn -> + all(from(p in "posts", select: p) |> plan()) + end end test "from with subquery" do query = - "posts" - |> select([r], %{x: r.x, y: r.y}) - |> subquery() - |> select([r], r.x) - |> plan() + subquery("posts" |> select([r], %{x: r.x, y: r.y})) |> select([r], r.x) |> plan() - assert all(query) == """ - SELECT s0."x" \ - FROM (SELECT sp0."x" AS "x", sp0."y" AS "y" FROM "posts" AS sp0) AS s0\ - """ + assert all(query) == + ~s{SELECT s0."x" FROM (SELECT sp0."x" AS "x", sp0."y" AS "y" FROM "posts" AS sp0) AS s0} query = - "posts" - |> select([r], %{x: r.x, z: r.y}) - |> subquery() - |> select([r], r) - |> plan() + subquery("posts" |> select([r], %{x: r.x, z: r.y})) |> select([r], r) |> plan() assert all(query) == - """ - SELECT s0."x", s0."z" \ - FROM (SELECT sp0."x" AS "x", sp0."y" AS "z" FROM "posts" AS sp0) AS s0\ - """ + ~s{SELECT s0."x", s0."z" FROM (SELECT sp0."x" AS "x", sp0."y" AS "z" FROM "posts" AS sp0) AS s0} query = - "posts" - |> select([r], %{x: r.x, z: r.y}) - |> subquery() - |> select([r], r) - |> subquery() + subquery(subquery("posts" |> select([r], %{x: r.x, z: r.y})) |> select([r], r)) |> select([r], r) |> plan() assert all(query) == - """ - SELECT s0."x", s0."z" \ - FROM (\ - SELECT ss0."x" AS "x", ss0."z" AS "z" \ - FROM (\ - SELECT ssp0."x" AS "x", ssp0."y" AS "z" \ - FROM "posts" AS ssp0\ - ) AS ss0\ - ) AS s0\ - """ + ~s{SELECT s0."x", s0."z" FROM (SELECT ss0."x" AS "x", ss0."z" AS "z" FROM (SELECT ssp0."x" AS "x", ssp0."y" AS "z" FROM "posts" AS ssp0) AS ss0) AS s0} end - test "common table expression" do + test "from with fragment" do + query = from(f in fragment("select ? as x", ^"abc"), select: f.x) |> plan() + assert all(query) == ~s{SELECT f0."x" FROM (select ? as x) AS f0} + + query = from(fragment("select ? as x", ^"abc"), select: fragment("x")) |> plan() + assert all(query) == ~s{SELECT x FROM (select ? as x) AS f0} + + query = from(f in fragment("select_rows(arg)"), select: f.x) |> plan() + assert all(query) == ~s{SELECT f0."x" FROM (select_rows(arg)) AS f0} + + assert_raise Ecto.QueryError, ~r/^SQLite3 does not support/, fn -> + all(from(f in fragment("select ? as x", ^"abc"), select: f) |> plan()) + end + end + + test "CTE" do + initial_query = + "categories" + |> where([c], is_nil(c.parent_id)) + |> select([c], %{id: c.id, depth: fragment("1")}) + iteration_query = "categories" |> join(:inner, [c], t in "tree", on: t.id == c.parent_id) |> select([c, t], %{id: c.id, depth: fragment("? + 1", t.depth)}) - cte_query = - "categories" - |> where([c], is_nil(c.parent_id)) - |> select([c], %{id: c.id, depth: fragment("1")}) - |> union_all(^iteration_query) + cte_query = initial_query |> union_all(^iteration_query) query = Schema @@ -235,19 +166,23 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - WITH RECURSIVE "tree" AS \ - (SELECT sc0."id" AS "id", 1 AS "depth" FROM "categories" AS sc0 WHERE (sc0."parent_id" IS NULL) \ - UNION ALL \ - SELECT c0."id", t1."depth" + 1 FROM "categories" AS c0 \ - INNER JOIN "tree" AS t1 ON t1."id" = c0."parent_id") \ - SELECT s0."x", t1."id", CAST(t1."depth" AS INTEGER) \ - FROM "schema" AS s0 \ - INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"\ - """ + ~s{WITH RECURSIVE "tree" AS } <> + ~s{(SELECT sc0."id" AS "id", 1 AS "depth" FROM "categories" AS sc0 WHERE (sc0."parent_id" IS NULL) } <> + ~s{UNION ALL } <> + ~s{(SELECT c0."id", t1."depth" + 1 FROM "categories" AS c0 } <> + ~s{INNER JOIN "tree" AS t1 ON t1."id" = c0."parent_id")) } <> + ~s{SELECT s0."x", t1."id", CAST(t1."depth" AS INTEGER) } <> + ~s{FROM "schema" AS s0 } <> + ~s{INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"} end - test "reference common table in union" do + @raw_sql_cte """ + SELECT * FROM categories WHERE c.parent_id IS NULL + UNION ALL + SELECT * FROM categories AS c, category_tree AS ct WHERE ct.id = c.parent_id + """ + + test "reference CTE in union" do comments_scope_query = "comments" |> where([c], is_nil(c.deleted_at)) @@ -270,27 +205,19 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - WITH "comments_scope" AS (\ - SELECT sc0."entity_id" AS "entity_id", sc0."text" AS "text" \ - FROM "comments" AS sc0 WHERE (sc0."deleted_at" IS NULL)) \ - SELECT p0."title", c1."text" \ - FROM "posts" AS p0 \ - INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = p0."guid" \ - UNION ALL \ - SELECT v0."title", c1."text" \ - FROM "videos" AS v0 \ - INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = v0."guid"\ - """ + ~s{WITH "comments_scope" AS (} <> + ~s{SELECT sc0."entity_id" AS "entity_id", sc0."text" AS "text" } <> + ~s{FROM "comments" AS sc0 WHERE (sc0."deleted_at" IS NULL)) } <> + ~s{SELECT p0."title", c1."text" } <> + ~s{FROM "posts" AS p0 } <> + ~s{INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = p0."guid" } <> + ~s{UNION ALL } <> + ~s{(SELECT v0."title", c1."text" } <> + ~s{FROM "videos" AS v0 } <> + ~s{INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = v0."guid")} end - @raw_sql_cte """ - SELECT * FROM categories WHERE c.parent_id IS NULL \ - UNION ALL \ - SELECT * FROM categories AS c, category_tree AS ct WHERE ct.id = c.parent_id\ - """ - - test "fragment common table expression" do + test "fragment CTE" do query = Schema |> recursive_ctes(true) @@ -300,20 +227,19 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - WITH RECURSIVE "tree" AS (#{@raw_sql_cte}) \ - SELECT s0."x" \ - FROM "schema" AS s0 \ - INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"\ - """ + ~s{WITH RECURSIVE "tree" AS (#{@raw_sql_cte}) } <> + ~s{SELECT s0."x" } <> + ~s{FROM "schema" AS s0 } <> + ~s{INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"} end - test "common table expression update_all" do + # TODO should error on lock + test "CTE update_all" do cte_query = - from( - x in Schema, + from(x in Schema, order_by: [asc: :id], limit: 10, + lock: "FOR UPDATE SKIP LOCKED", select: %{id: x.id} ) @@ -321,68 +247,98 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do Schema |> with_cte("target_rows", as: ^cte_query) |> join(:inner, [row], target in "target_rows", on: target.id == row.id) + |> select([r, t], r) |> update(set: [x: 123]) |> plan(:update_all) assert update_all(query) == - """ - WITH "target_rows" AS \ - (SELECT ss0."id" AS "id" FROM "schema" AS ss0 ORDER BY ss0."id" LIMIT 10) \ - UPDATE "schema" AS s0 \ - SET "x" = 123 \ - FROM "target_rows" AS t1 \ - WHERE (t1."id" = s0."id")\ - """ + ~s{WITH "target_rows" AS } <> + ~s{(SELECT ss0."id" AS "id" FROM "schema" AS ss0 ORDER BY ss0."id" LIMIT 10) } <> + ~s{UPDATE "schema" AS s0 } <> + ~s{SET "x" = 123 } <> + ~s{FROM "target_rows" AS t1 } <> + ~s{WHERE (t1."id" = s0."id") } <> + ~s{RETURNING "id", "x", "y", "z", "w", "meta"} end - test "common table expression delete_all" do - cte_query = from(x in Schema, order_by: [asc: :id], inner_join: q in Schema2, on: x.x == q.z, limit: 10, select: %{id: x.id}) + test "CTE delete_all" do + cte_query = + from(x in Schema, + order_by: [asc: :id], + limit: 10, + inner_join: q in Schema2, + on: x.x == q.z, + select: %{id: x.id} + ) query = Schema |> with_cte("target_rows", as: ^cte_query) + |> select([r, t], r) |> plan(:delete_all) - # TODO: This is valid in sqlite - # https://sqlite.org/lang_delete.html assert delete_all(query) == - """ - WITH "target_rows" AS \ - (SELECT ss0."id" AS "id" FROM "schema" AS ss0 ORDER BY ss0."id" LIMIT 10) \ - DELETE \ - FROM "schema" AS s0\ - """ + ~s{WITH "target_rows" AS } <> + ~s{(SELECT ss0."id" AS "id" FROM "schema" AS ss0 INNER JOIN "schema2" AS ss1 ON ss0."x" = ss1."z" ORDER BY ss0."id" LIMIT 10) } <> + ~s{DELETE FROM "schema" AS s0 } <> + ~s{RETURNING "id", "x", "y", "z", "w", "meta"} end - test "select" do - query = - Schema - |> select([r], {r.x, r.y}) - |> plan() + test "parent binding subquery and CTE" do + initial_query = + "categories" + |> where([c], c.id == parent_as(:parent_category).id) + |> select([:id, :parent_id]) - assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} + iteration_query = + "categories" + |> join(:inner, [c], t in "tree", on: t.parent_id == c.id) + |> select([:id, :parent_id]) + + cte_query = initial_query |> union_all(^iteration_query) + + breadcrumbs_query = + "tree" + |> recursive_ctes(true) + |> with_cte("tree", as: ^cte_query) + |> select([t], %{breadcrumbs: fragment("STRING_AGG(?, ' / ')", t.id)}) query = - Schema - |> select([r], [r.x, r.y]) + from(c in "categories", + as: :parent_category, + left_lateral_join: b in subquery(breadcrumbs_query), + select: %{id: c.id, breadcrumbs: b.breadcrumbs} + ) |> plan() + assert_raise Ecto.QueryError, + ~r/join `:left_lateral` not supported by SQLite3/, + fn -> + all(query) + end + end + + test "select" do + query = Schema |> select([r], {r.x, r.y}) |> plan() assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} - query = - Schema - |> select([r], struct(r, [:x, :y])) - |> plan() + query = Schema |> select([r], [r.x, r.y]) |> plan() + assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} + query = Schema |> select([r], struct(r, [:x, :y])) |> plan() assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} end test "aggregates" do - query = - Schema - |> select(count()) - |> plan() + query = Schema |> select([r], count(r.x)) |> plan() + assert all(query) == ~s{SELECT count(s0."x") FROM "schema" AS s0} + + assert_raise Ecto.QueryError, fn -> + query = Schema |> select([r], count(r.x, :distinct)) |> plan() + all(query) + end + query = Schema |> select([r], count()) |> plan() assert all(query) == ~s{SELECT count(*) FROM "schema" AS s0} end @@ -404,57 +360,33 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do end test "distinct" do - query = - Schema - |> distinct([r], true) - |> select([r], {r.x, r.y}) - |> plan() - + query = Schema |> distinct([r], true) |> select([r], {r.x, r.y}) |> plan() assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} - query = - Schema - |> distinct([r], false) - |> select([r], {r.x, r.y}) - |> plan() - + query = Schema |> distinct([r], false) |> select([r], {r.x, r.y}) |> plan() assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} - query = - Schema - |> distinct(true) - |> select([r], {r.x, r.y}) - |> plan() - + query = Schema |> distinct(true) |> select([r], {r.x, r.y}) |> plan() assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} - query = - Schema - |> distinct(false) - |> select([r], {r.x, r.y}) - |> plan() - + query = Schema |> distinct(false) |> select([r], {r.x, r.y}) |> plan() assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} - assert_raise( - Ecto.QueryError, - ~r"DISTINCT with multiple columns is not supported by SQLite3", - fn -> - Schema - |> distinct([r], [r.x, r.y]) - |> select([r], {r.x, r.y}) - |> plan() - |> all() - end - ) + assert_raise Ecto.QueryError, + ~r"DISTINCT with multiple columns is not supported by SQLite3", + fn -> + query = + Schema + |> distinct([r], [r.x, r.y]) + |> select([r], {r.x, r.y}) + |> plan() + + all(query) + end end test "coalesce" do - query = - Schema - |> select([s], coalesce(s.x, 5)) - |> plan() - + query = Schema |> select([s], coalesce(s.x, 5)) |> plan() assert all(query) == ~s{SELECT coalesce(s0."x", 5) FROM "schema" AS s0} end @@ -469,11 +401,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WHERE (s0."x" = 42) AND (s0."y" != 43)} - query = - Schema - |> where([r], {r.x, r.y} > {1, 2}) - |> select([r], r.x) - |> plan() + query = Schema |> where([r], {r.x, r.y} > {1, 2}) |> select([r], r.x) |> plan() assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WHERE ((s0."x",s0."y") > (1,2))} @@ -503,39 +431,17 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do end test "order by" do - query = - Schema - |> order_by([r], r.x) - |> select([r], r.x) - |> plan() - + query = Schema |> order_by([r], r.x) |> select([r], r.x) |> plan() assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x"} - query = - Schema - |> order_by([r], [r.x, r.y]) - |> select([r], r.x) - |> plan() - + query = Schema |> order_by([r], [r.x, r.y]) |> select([r], r.x) |> plan() assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x", s0."y"} - query = - Schema - |> order_by([r], asc: r.x, desc: r.y) - |> select([r], r.x) - |> plan() + query = Schema |> order_by([r], asc: r.x, desc: r.y) |> select([r], r.x) |> plan() assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x", s0."y" DESC} - query = - Schema - |> order_by([r], []) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0} - query = Schema |> order_by([r], asc_nulls_first: r.x, desc_nulls_first: r.y) @@ -553,128 +459,73 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x" ASC NULLS LAST, s0."y" DESC NULLS LAST} + + query = Schema |> order_by([r], []) |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0} end test "union and union all" do base_query = - Schema - |> select([r], r.x) - |> order_by(fragment("rand")) - |> offset(10) - |> limit(5) + Schema |> select([r], r.x) |> order_by(fragment("rand")) |> offset(10) |> limit(5) union_query1 = - Schema - |> select([r], r.y) - |> order_by([r], r.y) - |> offset(20) - |> limit(40) + Schema |> select([r], r.y) |> order_by([r], r.y) |> offset(20) |> limit(40) union_query2 = - Schema - |> select([r], r.z) - |> order_by([r], r.z) - |> offset(30) - |> limit(60) + Schema |> select([r], r.z) |> order_by([r], r.z) |> offset(30) |> limit(60) - query = - base_query - |> union(^union_query1) - |> union(^union_query2) - |> plan() + query = base_query |> union(^union_query1) |> union(^union_query2) |> plan() assert all(query) == - """ - SELECT s0."x" FROM "schema" AS s0 \ - UNION SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20 \ - UNION SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30 \ - ORDER BY rand LIMIT 5 OFFSET 10\ - """ + ~s{SELECT s0."x" FROM "schema" AS s0 } <> + ~s{UNION (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> + ~s{UNION (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{ORDER BY rand LIMIT 5 OFFSET 10} - query = - base_query - |> union_all(^union_query1) - |> union_all(^union_query2) - |> plan() + query = base_query |> union_all(^union_query1) |> union_all(^union_query2) |> plan() assert all(query) == - """ - SELECT s0."x" FROM "schema" AS s0 \ - UNION ALL SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20 \ - UNION ALL SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30 \ - ORDER BY rand LIMIT 5 OFFSET 10\ - """ + ~s{SELECT s0."x" FROM "schema" AS s0 } <> + ~s{UNION ALL (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> + ~s{UNION ALL (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{ORDER BY rand LIMIT 5 OFFSET 10} end test "except and except all" do base_query = - Schema - |> select([r], r.x) - |> order_by(fragment("rand")) - |> offset(10) - |> limit(5) + Schema |> select([r], r.x) |> order_by(fragment("rand")) |> offset(10) |> limit(5) except_query1 = - Schema - |> select([r], r.y) - |> order_by([r], r.y) - |> offset(20) - |> limit(40) + Schema |> select([r], r.y) |> order_by([r], r.y) |> offset(20) |> limit(40) except_query2 = - Schema - |> select([r], r.z) - |> order_by([r], r.z) - |> offset(30) - |> limit(60) + Schema |> select([r], r.z) |> order_by([r], r.z) |> offset(30) |> limit(60) - query = - base_query - |> except(^except_query1) - |> except(^except_query2) - |> plan() + query = base_query |> except(^except_query1) |> except(^except_query2) |> plan() assert all(query) == - """ - SELECT s0."x" FROM "schema" AS s0 \ - EXCEPT SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20 \ - EXCEPT SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30 \ - ORDER BY rand LIMIT 5 OFFSET 10\ - """ + ~s{SELECT s0."x" FROM "schema" AS s0 } <> + ~s{EXCEPT (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> + ~s{EXCEPT (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{ORDER BY rand LIMIT 5 OFFSET 10} - assert_raise( - Ecto.QueryError, - fn -> - base_query - |> except_all(^except_query1) - |> except_all(^except_query2) - |> plan() - |> all() - end - ) + query = + base_query |> except_all(^except_query1) |> except_all(^except_query2) |> plan() + + assert_raise Ecto.QueryError, fn -> + all(query) + end end test "intersect and intersect all" do base_query = - Schema - |> select([r], r.x) - |> order_by(fragment("rand")) - |> offset(10) - |> limit(5) + Schema |> select([r], r.x) |> order_by(fragment("rand")) |> offset(10) |> limit(5) intersect_query1 = - Schema - |> select([r], r.y) - |> order_by([r], r.y) - |> offset(20) - |> limit(40) + Schema |> select([r], r.y) |> order_by([r], r.y) |> offset(20) |> limit(40) intersect_query2 = - Schema - |> select([r], r.z) - |> order_by([r], r.z) - |> offset(30) - |> limit(60) + Schema |> select([r], r.z) |> order_by([r], r.z) |> offset(30) |> limit(60) query = base_query @@ -683,308 +534,220 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT s0."x" FROM "schema" AS s0 \ - INTERSECT SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20 \ - INTERSECT SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30 \ - ORDER BY rand LIMIT 5 OFFSET 10\ - """ - - assert_raise( - Ecto.QueryError, - fn -> - base_query - |> intersect_all(^intersect_query1) - |> intersect_all(^intersect_query2) - |> plan() - |> all() - end - ) - end + ~s{SELECT s0."x" FROM "schema" AS s0 } <> + ~s{INTERSECT (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> + ~s{INTERSECT (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{ORDER BY rand LIMIT 5 OFFSET 10} - test "limit and offset" do query = - Schema - |> limit([r], 3) - |> select([], true) + base_query + |> intersect_all(^intersect_query1) + |> intersect_all(^intersect_query2) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 LIMIT 3} + assert_raise Ecto.QueryError, fn -> + all(query) + end + end - query = - Schema - |> offset([r], 5) - |> select([], true) - |> plan() + test "limit and offset" do + query = Schema |> limit([r], 3) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 LIMIT 3} + query = Schema |> offset([r], 5) |> select([], true) |> plan() assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 OFFSET 5} - query = - Schema - |> offset([r], 5) - |> limit([r], 3) - |> select([], true) - |> plan() - + query = Schema |> offset([r], 5) |> limit([r], 3) |> select([], true) |> plan() assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 LIMIT 3 OFFSET 5} end test "lock" do - assert_raise( - ArgumentError, - "locks are not supported by SQLite3", - fn -> - Schema - |> lock("LOCK IN SHARE MODE") - |> select([], true) - |> plan() - |> all() - end - ) + assert_raise ArgumentError, "locks are not supported by SQLite3", fn -> + Schema |> lock("FOR SHARE NOWAIT") |> select([], true) |> plan() |> all() + end - assert_raise( - ArgumentError, - "locks are not supported by SQLite3", - fn -> - Schema - |> lock([p], fragment("UPDATE on ?", p)) - |> select([], true) - |> plan() - |> all() - end - ) + assert_raise ArgumentError, "locks are not supported by SQLite3", fn -> + Schema + |> lock([p], fragment("UPDATE on ?", p)) + |> select([], true) + |> plan() + |> all() + end end test "string escape" do - query = - "schema" - |> where(foo: "'\\ ") - |> select([], true) - |> plan() + query = "schema" |> where(foo: "'\\ ") |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = '''\\\\ ')} - - query = - "schema" - |> where(foo: "'") - |> select([], true) - |> plan() + assert all(query) == + ~s{SELECT 1 FROM \"schema\" AS s0 WHERE (s0.\"foo\" = '''\\\\ ')} + query = "schema" |> where(foo: "'") |> select([], true) |> plan() assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = '''')} end test "binary ops" do - query = - Schema - |> select([r], r.x == 2) - |> plan() - + query = Schema |> select([r], r.x == 2) |> plan() assert all(query) == ~s{SELECT s0."x" = 2 FROM "schema" AS s0} - query = - Schema - |> select([r], r.x != 2) - |> plan() - + query = Schema |> select([r], r.x != 2) |> plan() assert all(query) == ~s{SELECT s0."x" != 2 FROM "schema" AS s0} - query = - Schema - |> select([r], r.x <= 2) - |> plan() - + query = Schema |> select([r], r.x <= 2) |> plan() assert all(query) == ~s{SELECT s0."x" <= 2 FROM "schema" AS s0} - query = - Schema - |> select([r], r.x >= 2) - |> plan() - + query = Schema |> select([r], r.x >= 2) |> plan() assert all(query) == ~s{SELECT s0."x" >= 2 FROM "schema" AS s0} - query = - Schema - |> select([r], r.x < 2) - |> plan() - + query = Schema |> select([r], r.x < 2) |> plan() assert all(query) == ~s{SELECT s0."x" < 2 FROM "schema" AS s0} - query = - Schema - |> select([r], r.x > 2) - |> plan() - + query = Schema |> select([r], r.x > 2) |> plan() assert all(query) == ~s{SELECT s0."x" > 2 FROM "schema" AS s0} - query = - Schema - |> select([r], r.x + 2) - |> plan() - + query = Schema |> select([r], r.x + 2) |> plan() assert all(query) == ~s{SELECT s0."x" + 2 FROM "schema" AS s0} end test "is_nil" do - query = - Schema - |> select([r], is_nil(r.x)) - |> plan() - + query = Schema |> select([r], is_nil(r.x)) |> plan() assert all(query) == ~s{SELECT s0."x" IS NULL FROM "schema" AS s0} - query = - Schema - |> select([r], not is_nil(r.x)) - |> plan() - + query = Schema |> select([r], not is_nil(r.x)) |> plan() assert all(query) == ~s{SELECT NOT (s0."x" IS NULL) FROM "schema" AS s0} - query = - "schema" - |> select([r], r.x == is_nil(r.y)) - |> plan() - + query = "schema" |> select([r], r.x == is_nil(r.y)) |> plan() assert all(query) == ~s{SELECT s0."x" = (s0."y" IS NULL) FROM "schema" AS s0} end - test "order_by and types" do - query = - "schema3" - |> order_by([e], type(fragment("?", e.binary), ^:decimal)) - |> select(true) - |> plan() + test "fragments" do + query = Schema |> select([r], fragment("now")) |> plan() + assert all(query) == ~s{SELECT now FROM "schema" AS s0} - assert all(query) == ~s{SELECT 1 FROM "schema3" AS s0 ORDER BY (s0."binary" + 0)} - end + query = Schema |> select([r], fragment("fun(?)", r)) |> plan() + assert all(query) == ~s{SELECT fun(s0) FROM "schema" AS s0} + + query = Schema |> select([r], fragment("downcase(?)", r.x)) |> plan() + assert all(query) == ~s{SELECT downcase(s0."x") FROM "schema" AS s0} - test "fragments" do query = - Schema - |> select([r], fragment("now")) - |> plan() + Schema |> select([r], fragment("? COLLATE ?", r.x, literal(^"es_ES"))) |> plan() - assert all(query) == ~s{SELECT now FROM "schema" AS s0} + assert all(query) == ~s{SELECT s0."x" COLLATE "es_ES" FROM "schema" AS s0} - query = - Schema - |> select([r], fragment("fun(?)", r)) - |> plan() + value = 13 + query = Schema |> select([r], fragment("downcase(?, ?)", r.x, ^value)) |> plan() + assert all(query) == ~s{SELECT downcase(s0."x", ?) FROM "schema" AS s0} - assert all(query) == ~s{SELECT fun(s0) FROM "schema" AS s0} + query = Schema |> select([], fragment(title: 2)) |> plan() - query = - Schema - |> select([r], fragment("lcase(?)", r.x)) - |> plan() + assert_raise Ecto.QueryError, fn -> + all(query) + end + end - assert all(query) == ~s{SELECT lcase(s0."x") FROM "schema" AS s0} + test "literals" do + query = "schema" |> where(foo: true) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 1)} - query = - Schema - |> select([r], r.x) - |> where([], fragment(~s|? = "query\\?"|, ^10)) - |> plan() + query = "schema" |> where(foo: false) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 0)} - assert all(query) == ~s|SELECT s0."x" FROM "schema" AS s0 WHERE (? = "query?")| + query = "schema" |> where(foo: "abc") |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 'abc')} - value = 13 + query = "schema" |> where(foo: <<0, ?a, ?b, ?c>>) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = x'00616263')} - query = - Schema - |> select([r], fragment("lcase(?, ?)", r.x, ^value)) - |> plan() + query = "schema" |> where(foo: 123) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 123)} - assert all(query) == ~s{SELECT lcase(s0."x", ?) FROM "schema" AS s0} + query = "schema" |> where(foo: 123.0) |> select([], true) |> plan() - assert_raise( - Ecto.QueryError, - fn -> - Schema - |> select([], fragment(title: 2)) - |> plan() - |> all() - end - ) + assert all(query) == + ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = CAST(123.0 AS REAL))} end - test "literals" do + test "aliasing a selected value with selected_as/2" do + query = "schema" |> select([s], selected_as(s.x, :integer)) |> plan() + assert all(query) == ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0} + query = "schema" - |> where(foo: true) - |> select([], true) + |> select([s], s.x |> coalesce(0) |> sum() |> selected_as(:integer)) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 1)} + assert all(query) == + ~s{SELECT sum(coalesce(s0."x", 0)) AS "integer" FROM "schema" AS s0} + end + test "group_by can reference the alias of a selected value with selected_as/1" do query = "schema" - |> where(foo: false) - |> select([], true) + |> select([s], selected_as(s.x, :integer)) + |> group_by(selected_as(:integer)) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 0)} + assert all(query) == + ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0 GROUP BY "integer"} + end + test "order_by can reference the alias of a selected value with selected_as/1" do query = "schema" - |> where(foo: "abc") - |> select([], true) + |> select([s], selected_as(s.x, :integer)) + |> order_by(selected_as(:integer)) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 'abc')} + assert all(query) == + ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0 ORDER BY "integer"} query = "schema" - |> where(foo: 123) - |> select([], true) + |> select([s], selected_as(s.x, :integer)) + |> order_by(desc: selected_as(:integer)) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 123)} + assert all(query) == + ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0 ORDER BY "integer" DESC} + end + test "datetime_add" do query = "schema" - |> where(foo: 123.0) + |> where([s], datetime_add(s.foo, 1, "month") > s.bar) |> select([], true) |> plan() assert all(query) == - ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = CAST(123.0 AS REAL))} - - name = "y" + ~s{SELECT 1 FROM "schema" AS s0 WHERE (datetime(s0."foo",'1 month') > s0."bar")} query = "schema" - |> where(fragment("? = ?", literal(^name), "Main")) + |> where([s], datetime_add(type(s.foo, :string), 1, "month") > s.bar) |> select([], true) |> plan() - assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE ("y" = 'Main')| - end - - test "selected_as" do - query = - from(s in "schema", - select: %{ - y: selected_as(s.y, :y2) - } - ) - |> plan() - - assert all(query) == ~s|SELECT s0."y" AS "y2" FROM "schema" AS s0| + assert all(query) == + ~s{SELECT 1 FROM "schema" AS s0 WHERE (datetime(CAST(s0."foo" AS TEXT),'1 month') > s0."bar")} end test "tagged type" do + query = Schema |> select([t], type(t.x + t.y, :integer)) |> plan() + assert all(query) == ~s{SELECT CAST(s0."x" + s0."y" AS INTEGER) FROM "schema" AS s0} + query = Schema |> select([], type(^"601d74e4-a8d3-4b6e-8365-eddb4c893327", Ecto.UUID)) |> plan() assert all(query) == ~s{SELECT CAST(? AS TEXT) FROM "schema" AS s0} - end - test "string type" do query = Schema - |> select([], type(^"test", :string)) + |> select( + [], + type(^["601d74e4-a8d3-4b6e-8365-eddb4c893327"], {:array, Ecto.UUID}) + ) |> plan() assert all(query) == ~s{SELECT CAST(? AS TEXT) FROM "schema" AS s0} @@ -994,45 +757,74 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do query = Schema |> select([s], json_extract_path(s.meta, [0, 1])) |> plan() assert all(query) == - ~s{SELECT json_extract(s0."meta", '$[0][1]') FROM "schema" AS s0} + ~s|SELECT json_extract(s0.\"meta\", '$[0][1]') FROM "schema" AS s0| query = Schema |> select([s], json_extract_path(s.meta, ["a", "b"])) |> plan() - assert all(query) == ~s{SELECT json_extract(s0."meta", '$.a.b') FROM "schema" AS s0} + + assert all(query) == + ~s|SELECT json_extract(s0.\"meta\", '$.a.b') FROM "schema" AS s0| query = Schema |> select([s], json_extract_path(s.meta, ["'a"])) |> plan() - assert all(query) == ~s{SELECT json_extract(s0."meta", '$.''a') FROM "schema" AS s0} + + assert all(query) == + ~s|SELECT json_extract(s0.\"meta\", '$.''a') FROM "schema" AS s0| query = Schema |> select([s], json_extract_path(s.meta, ["\"a"])) |> plan() assert all(query) == - ~s{SELECT json_extract(s0."meta", '$.\\"a') FROM "schema" AS s0} + ~s|SELECT json_extract(s0.\"meta\", '$.\\\"a') FROM "schema" AS s0| + end - query = Schema |> select([s], s.meta["author"]["name"]) |> plan() + test "optimized json_extract_path" do + query = Schema |> where([s], s.meta["id"] == 123) |> select(true) |> plan() assert all(query) == - ~s{SELECT json_extract(s0."meta", '$.author.name') FROM "schema" AS s0} + ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0.\"meta\", '$.id') = 123)| + + query = Schema |> where([s], s.meta["id"] == "123") |> select(true) |> plan() + + assert all(query) == + ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.id') = '123')| + + query = + Schema |> where([s], s.meta["tags"][0]["name"] == "123") |> select(true) |> plan() + + assert all(query) == + ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.tags[0].name') = '123')| + + query = Schema |> where([s], s.meta[0] == "123") |> select(true) |> plan() + + assert all(query) == + ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0.\"meta\", '$[0]') = '123')| + + query = Schema |> where([s], s.meta["enabled"] == true) |> select(true) |> plan() + + assert all(query) == + ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.enabled') = 1)| + + query = + Schema + |> where([s], s.meta["extra"][0]["enabled"] == false) + |> select(true) + |> plan() + + assert all(query) == + ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.extra[0].enabled') = 0)| end test "nested expressions" do z = 123 query = - (r in Schema) - |> from([]) - |> select([r], (r.x > 0 and r.y > ^(-z)) or true) - |> plan() + from(r in Schema, []) |> select([r], (r.x > 0 and r.y > ^(-z)) or true) |> plan() assert all(query) == ~s{SELECT ((s0."x" > 0) AND (s0."y" > ?)) OR 1 FROM "schema" AS s0} end test "in expression" do - query = - Schema - |> select([e], 1 in [1, e.x, 3]) - |> plan() - - assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} + query = Schema |> select([e], 1 in []) |> plan() + assert all(query) == ~s{SELECT 0 FROM "schema" AS s0} query = Schema @@ -1068,16 +860,20 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> select([e], e in [1, 2, 3]) |> plan() - assert all(query) == - ~s{SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM "schema" AS s0} + assert all(query) == ~s{SELECT s0 IN (1,2,3) FROM "schema" AS s0} + + query = Schema |> select([e], 1 in [1, e.x, 3]) |> plan() + assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} + + query = Schema |> select([e], 1 in [1, ^2, 3]) |> plan() + assert all(query) == ~s{SELECT 1 IN (1,?,3) FROM "schema" AS s0} + + query = Schema |> select([e], ^1 in [1, ^2, 3]) |> plan() + assert all(query) == ~s{SELECT ? IN (1,?,3) FROM "schema" AS s0} end test "in subquery" do - posts = - "posts" - |> where(title: ^"hello") - |> select([p], p.id) - |> subquery() + posts = subquery("posts" |> where(title: ^"hello") |> select([p], p.id)) query = "comments" @@ -1086,16 +882,15 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT c0."x" FROM "comments" AS c0 \ - WHERE (c0."post_id" IN (SELECT sp0."id" FROM "posts" AS sp0 WHERE (sp0."title" = ?)))\ - """ + ~s{SELECT c0."x" FROM "comments" AS c0 } <> + ~s{WHERE (c0."post_id" IN (SELECT sp0."id" FROM "posts" AS sp0 WHERE (sp0."title" = ?)))} posts = - "posts" - |> where(title: parent_as(:comment).subtitle) - |> select([p], p.id) - |> subquery() + subquery( + "posts" + |> where(title: parent_as(:comment).subtitle) + |> select([p], p.id) + ) query = "comments" @@ -1105,117 +900,79 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT c0."x" FROM "comments" AS c0 \ - WHERE (c0."post_id" IN (SELECT sp0."id" FROM "posts" AS sp0 WHERE (sp0."title" = c0."subtitle")))\ - """ + ~s{SELECT c0."x" FROM "comments" AS c0 } <> + ~s{WHERE (c0."post_id" IN (SELECT sp0."id" FROM "posts" AS sp0 WHERE (sp0."title" = c0."subtitle")))} end test "having" do - query = - Schema - |> having([p], p.x == p.x) - |> select([p], p.x) - |> plan() - - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 HAVING (s0."x" = s0."x")} + query = Schema |> having([p], p.x == p.x) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x")} query = Schema |> having([p], p.x == p.x) |> having([p], p.y == p.y) - |> select([p], [p.y, p.x]) + |> select([], true) |> plan() assert all(query) == - """ - SELECT s0."y", s0."x" \ - FROM "schema" AS s0 \ - HAVING (s0."x" = s0."x") \ - AND (s0."y" = s0."y")\ - """ + ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x") AND (s0."y" = s0."y")} end test "or_having" do - query = - Schema - |> or_having([p], p.x == p.x) - |> select([p], p.x) - |> plan() - - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 HAVING (s0."x" = s0."x")} + query = Schema |> or_having([p], p.x == p.x) |> select([], true) |> plan() + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x")} query = Schema |> or_having([p], p.x == p.x) |> or_having([p], p.y == p.y) - |> select([p], [p.y, p.x]) + |> select([], true) |> plan() assert all(query) == - """ - SELECT s0."y", s0."x" \ - FROM "schema" AS s0 \ - HAVING (s0."x" = s0."x") \ - OR (s0."y" = s0."y")\ - """ + ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x") OR (s0."y" = s0."y")} end test "group by" do - query = - Schema - |> group_by([r], r.x) - |> select([r], r.x) - |> plan() - + query = Schema |> group_by([r], r.x) |> select([r], r.x) |> plan() assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY s0."x"} - query = - Schema - |> group_by([r], 2) - |> select([r], r.x) - |> plan() - + query = Schema |> group_by([r], 2) |> select([r], r.x) |> plan() assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY 2} - query = - Schema - |> group_by([r], [r.x, r.y]) - |> select([r], r.x) - |> plan() - + query = Schema |> group_by([r], [r.x, r.y]) |> select([r], r.x) |> plan() assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY s0."x", s0."y"} - query = - Schema - |> group_by([r], []) - |> select([r], r.x) - |> plan() - + query = Schema |> group_by([r], []) |> select([r], r.x) |> plan() assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0} end + test "arrays and sigils" do + query = Schema |> select([], fragment("?", [1, 2, 3])) |> plan() + assert all(query) == ~s{SELECT JSON_ARRAY('[1,2,3]') FROM "schema" AS s0} + + query = Schema |> select([], fragment("?", ~w(abc def))) |> plan() + assert all(query) == ~s{SELECT JSON_ARRAY('["abc","def"]') FROM "schema" AS s0} + + query = Schema |> where([s], s.w == []) |> select([s], s.w) |> plan() + + assert all(query) == + ~s{SELECT s0."w" FROM "schema" AS s0 WHERE (s0."w" = JSON_ARRAY('[]'))} + end + test "interpolated values" do cte1 = - "schema1" - |> select([m], %{id: m.id, smth: ^true}) - |> where([], fragment("?", ^1)) - - union = - "schema1" - |> select([m], {m.id, ^true}) - |> where([], fragment("?", ^5)) + "schema1" |> select([m], %{id: m.id, smth: ^true}) |> where([], fragment("?", ^1)) - union_all = - "schema2" - |> select([m], {m.id, ^false}) - |> where([], fragment("?", ^6)) + union = "schema1" |> select([m], {m.id, ^true}) |> where([], fragment("?", ^5)) + union_all = "schema2" |> select([m], {m.id, ^false}) |> where([], fragment("?", ^6)) query = - Schema + "schema" |> with_cte("cte1", as: ^cte1) |> with_cte("cte2", as: fragment("SELECT * FROM schema WHERE ?", ^2)) - |> select([m], {m.id, ^0}) + |> select([m], {m.id, ^true}) |> join(:inner, [], Schema2, on: fragment("?", ^true)) |> join(:inner, [], Schema2, on: fragment("?", ^false)) |> where([], fragment("?", ^true)) @@ -1231,80 +988,107 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> offset([], ^9) |> plan() + result = + "WITH \"cte1\" AS (SELECT ss0.\"id\" AS \"id\", ? AS \"smth\" FROM \"schema1\" AS ss0 WHERE (?)), " <> + "\"cte2\" AS (SELECT * FROM schema WHERE ?) " <> + "SELECT s0.\"id\", ? FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON ? " <> + "INNER JOIN \"schema2\" AS s2 ON ? WHERE (?) AND (?) " <> + "GROUP BY ?, ? HAVING (?) AND (?) " <> + "UNION (SELECT s0.\"id\", ? FROM \"schema1\" AS s0 WHERE (?)) " <> + "UNION ALL (SELECT s0.\"id\", ? FROM \"schema2\" AS s0 WHERE (?)) " <> + "ORDER BY ? LIMIT ? OFFSET ?" + + assert all(query) == String.trim(result) + end + + test "order_by and types" do + query = + "schema3" + |> order_by([e], type(fragment("?", e.binary), ^:decimal)) + |> select(true) + |> plan() + assert all(query) == - """ - WITH "cte1" AS (SELECT ss0."id" AS "id", ? AS "smth" FROM "schema1" AS ss0 WHERE (?)), \ - "cte2" AS (SELECT * FROM schema WHERE ?) \ - SELECT s0."id", ? FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON ? \ - INNER JOIN "schema2" AS s2 ON ? WHERE (?) AND (?) \ - GROUP BY ?, ? HAVING (?) AND (?) \ - UNION SELECT s0."id", ? FROM "schema1" AS s0 WHERE (?) \ - UNION ALL SELECT s0."id", ? FROM "schema2" AS s0 WHERE (?) \ - ORDER BY ? LIMIT ? OFFSET ?\ - """ + "SELECT 1 FROM \"schema3\" AS s0 ORDER BY CAST(s0.\"binary\" AS REAL)" + end + + test "fragments and types" do + query = + plan( + from(e in "schema", + where: + fragment( + "extract(? from ?) = ?", + ^"month", + e.start_time, + type(^"4", :integer) + ), + where: + fragment( + "extract(? from ?) = ?", + ^"year", + e.start_time, + type(^"2015", :integer) + ), + select: true + ) + ) + + result = + "SELECT 1 FROM \"schema\" AS s0 " <> + "WHERE (extract(? from s0.\"start_time\") = CAST(? AS INTEGER)) " <> + "AND (extract(? from s0.\"start_time\") = CAST(? AS INTEGER))" + + assert all(query) == String.trim(result) end test "fragments allow ? to be escaped with backslash" do query = - (e in "schema") - |> from( - where: fragment(~s|? = "query\\?"|, e.start_time), - select: true + plan( + from(e in "schema", + where: fragment("? = \"query\\?\"", e.start_time), + select: true + ) ) - |> plan() - result = ~s|SELECT 1 FROM "schema" AS s0 WHERE (s0."start_time" = "query?")| + result = + "SELECT 1 FROM \"schema\" AS s0 " <> + "WHERE (s0.\"start_time\" = \"query?\")" + + assert all(query) == String.trim(result) + end + + test "build_explain_query" do + assert SQL.build_explain_query("SELECT 1", :query_plan) == + "EXPLAIN QUERY PLAN SELECT 1" - assert all(query) == result + assert SQL.build_explain_query("SELECT 1", :instructions) == "EXPLAIN SELECT 1" end - ## ## *_all - ## test "update all" do - query = - (m in Schema) - |> from(update: [set: [x: 0]]) - |> plan(:update_all) + query = from(m in Schema, update: [set: [x: 0]]) |> plan(:update_all) - assert update_all(query) == ~s{UPDATE "schema" AS s0 SET "x" = 0} + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = 0} query = - (m in Schema) - |> from(update: [set: [x: 0], inc: [y: 1, z: -3]]) - |> plan(:update_all) + from(m in Schema, update: [set: [x: 0], inc: [y: 1, z: -3]]) |> plan(:update_all) - # TODO: should probably be "y = s0."y" + 1" - # table-name.column-name is not allowed on the left hand side of SET - # but is allowed on right hand side, and we should err towards being more explicit assert update_all(query) == - """ - UPDATE "schema" AS s0 \ - SET \ - "x" = 0, \ - "y" = "y" + 1, \ - "z" = "z" + -3\ - """ + ~s{UPDATE "schema" AS s0 SET "x" = 0, "y" = "y" + 1, "z" = "z" + -3} query = - (e in Schema) - |> from(where: e.x == 123, update: [set: [x: 0]]) - |> plan(:update_all) + from(e in Schema, where: e.x == 123, update: [set: [x: 0]]) |> plan(:update_all) assert update_all(query) == - """ - UPDATE "schema" AS s0 \ - SET "x" = 0 \ - WHERE (s0."x" = 123)\ - """ + ~s{UPDATE "schema" AS s0 SET "x" = 0 WHERE (s0."x" = 123)} - query = - (m in Schema) - |> from(update: [set: [x: ^0]]) - |> plan(:update_all) + query = from(m in Schema, update: [set: [x: ^0]]) |> plan(:update_all) - assert update_all(query) == ~s|UPDATE "schema" AS s0 SET "x" = ?| + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = ?} query = Schema @@ -1313,17 +1097,10 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan(:update_all) assert update_all(query) == - """ - UPDATE "schema" AS s0 \ - SET \ - "x" = 0 \ - FROM "schema2" AS s1 \ - WHERE (s0."x" = s1."z")\ - """ + ~s{UPDATE "schema" AS s0 SET "x" = 0 FROM "schema2" AS s1 WHERE (s0."x" = s1."z")} query = - (e in Schema) - |> from( + from(e in Schema, where: e.x == 123, update: [set: [x: 0]], join: q in Schema2, @@ -1332,150 +1109,154 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan(:update_all) assert update_all(query) == - """ - UPDATE "schema" AS s0 \ - SET "x" = 0 \ - FROM "schema2" AS s1 \ - WHERE (s0."x" = s1."z") \ - AND (s0."x" = 123)\ - """ + ~s{UPDATE "schema" AS s0 SET "x" = 0 FROM "schema2" AS s1 } <> + ~s{WHERE (s0."x" = s1."z") AND (s0."x" = 123)} + end + test "update all with returning" do query = - from( - p in Post, - where: p.title == ^"foo", - select: p.content, - update: [set: [title: "bar"]] - ) + from(m in Schema, update: [set: [x: 0]]) |> select([m], m) |> plan(:update_all) + + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "x" = 0 RETURNING "id", "x", "y", "z", "w", "meta"} + + query = + from(m in Schema, update: [set: [x: ^1]]) + |> where([m], m.x == ^2) + |> select([m], m.x == ^3) |> plan(:update_all) assert update_all(query) == - """ - UPDATE "posts" AS p0 \ - SET "title" = 'bar' \ - WHERE (p0."title" = ?) \ - RETURNING "content"\ - """ + ~s{UPDATE "schema" AS s0 SET "x" = ? WHERE (s0."x" = ?) RETURNING "x" = ?} end - test "update all with prefix" do + test "update all array ops" do + query = from(m in Schema, update: [push: [w: 0]]) |> plan(:update_all) + + assert update_all(query) == + ~s{UPDATE "schema" AS s0 SET "w" = JSON_INSERT("w",'$[#]',0)} + + query = from(m in Schema, update: [pull: [w: 0]]) |> plan(:update_all) + + assert_raise Ecto.QueryError, fn -> + update_all(query) + end + end + + test "update all with subquery" do + sub = from(p in Schema, where: p.x > ^10) + query = - (m in Schema) - |> from(update: [set: [x: 0]]) - |> Map.put(:prefix, "prefix") - |> plan(:update_all) + Schema + |> join(:inner, [p], p2 in subquery(sub), on: p.id == p2.id) + |> update([_], set: [x: ^100]) + + {planned_query, cast_params, dump_params} = + Ecto.Adapter.Queryable.plan_query(:update_all, Ecto.Adapters.SQLite3, query) - assert update_all(query) == ~s{UPDATE "prefix"."schema" AS s0 SET "x" = 0} + assert update_all(planned_query) == + ~s{UPDATE "schema" AS s0 SET "x" = ? FROM } <> + ~s{(SELECT ss0."id" AS "id", ss0."x" AS "x", ss0."y" AS "y", ss0."z" AS "z", ss0."w" AS "w", ss0."meta" AS "meta" FROM "schema" AS ss0 WHERE (ss0."x" > ?)) } <> + ~s{AS s1 WHERE (s0."id" = s1."id")} + assert cast_params == [100, 10] + assert dump_params == [100, 10] + end + + test "update all with prefix" do query = - (m in Schema) - |> from(prefix: "first", update: [set: [x: 0]]) + from(m in Schema, update: [set: [x: 0]]) |> Map.put(:prefix, "prefix") |> plan(:update_all) - assert update_all(query) == ~s{UPDATE "first"."schema" AS s0 SET "x" = 0} - end + assert update_all(query) == + ~s{UPDATE "prefix"."schema" AS s0 SET "x" = 0} - test "update all with returning" do query = - from(p in Post, update: [set: [title: "foo"]]) - |> select([p], p) + from(m in Schema, prefix: "first", update: [set: [x: 0]]) + |> Map.put(:prefix, "prefix") |> plan(:update_all) assert update_all(query) == - """ - UPDATE "posts" AS p0 \ - SET "title" = 'foo' \ - RETURNING "id", "title", "content"\ - """ + ~s{UPDATE "first"."schema" AS s0 SET "x" = 0} + end + # TODO this is broken? + test "update all with left join" do query = - from(m in Schema, update: [set: [x: ^1]]) - |> where([m], m.x == ^2) - |> select([m], m.x == ^3) + from(m in Schema, + join: x in assoc(m, :comments), + left_join: p in assoc(m, :permalink), + update: [set: [w: m.list2]] + ) |> plan(:update_all) assert update_all(query) == - """ - UPDATE "schema" AS s0 \ - SET "x" = ? \ - WHERE (s0."x" = ?) \ - RETURNING "x" = ?\ - """ + ~s{UPDATE "schema" AS s0 SET "w" = s0."list2" FROM "schema2" AS s1 LEFT OUTER JOIN "schema3" AS s2 ON s2."id" = s0."y" WHERE (s1."z" = s0."x")} end test "delete all" do - query = - Schema - |> Ecto.Queryable.to_query() - |> plan() - + query = Schema |> Queryable.to_query() |> plan() assert delete_all(query) == ~s{DELETE FROM "schema" AS s0} + query = from(e in Schema, where: e.x == 123) |> plan() + + assert delete_all(query) == + ~s{DELETE FROM "schema" AS s0 WHERE (s0."x" = 123)} + + query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) |> plan() + + assert_raise ArgumentError, fn -> + delete_all(query) + end + query = - (e in Schema) - |> from(where: e.x == 123) - |> plan() + from(e in Schema, where: e.x == 123, join: q in Schema2, on: e.x == q.z) |> plan() - assert delete_all(query) == ~s{DELETE FROM "schema" AS s0 WHERE (s0."x" = 123)} + assert_raise ArgumentError, fn -> + delete_all(query) + end query = - (e in Schema) - |> from(where: e.x == 123, select: e.x) + from(e in Schema, + where: e.x == 123, + join: assoc(e, :comments), + join: assoc(e, :permalink) + ) |> plan() - assert delete_all(query) == - """ - DELETE FROM "schema" AS s0 \ - WHERE (s0."x" = 123) RETURNING "x"\ - """ + assert_raise ArgumentError, fn -> + delete_all(query) + end end test "delete all with returning" do - query = Post |> Ecto.Queryable.to_query() |> select([m], m) |> plan() + query = Schema |> Queryable.to_query() |> select([m], m) |> plan() assert delete_all(query) == - """ - DELETE FROM "posts" AS p0 \ - RETURNING "id", "title", "content"\ - """ + ~s{DELETE FROM "schema" AS s0 RETURNING "id", "x", "y", "z", "w", "meta"} end test "delete all with prefix" do - query = - Schema - |> Ecto.Queryable.to_query() - |> Map.put(:prefix, "prefix") - |> plan() - + query = Schema |> Queryable.to_query() |> Map.put(:prefix, "prefix") |> plan() assert delete_all(query) == ~s{DELETE FROM "prefix"."schema" AS s0} - query = - Schema - |> from(prefix: "first") - |> Map.put(:prefix, "prefix") - |> plan() - + query = Schema |> from(prefix: "first") |> Map.put(:prefix, "prefix") |> plan() assert delete_all(query) == ~s{DELETE FROM "first"."schema" AS s0} end - ## ## Partitions and windows - ## - describe "windows" do + describe "windows and partitions" do test "one window" do query = Schema |> select([r], r.x) |> windows([r], w: [partition_by: r.x]) - |> plan() + |> plan assert all(query) == - """ - SELECT s0."x" \ - FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x")\ - """ + ~s{SELECT s0."x" FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x")} end test "two windows" do @@ -1486,11 +1267,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT s0."x" \ - FROM "schema" AS s0 WINDOW "w1" AS (PARTITION BY s0."x"), \ - "w2" AS (PARTITION BY s0."y")\ - """ + ~s{SELECT s0."x" FROM "schema" AS s0 WINDOW "w1" AS (PARTITION BY s0."x"), "w2" AS (PARTITION BY s0."y")} end test "count over window" do @@ -1501,10 +1278,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT count(s0."x") OVER "w" \ - FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x")\ - """ + ~s{SELECT count(s0."x") OVER "w" FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x")} end test "count over all" do @@ -1531,11 +1305,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> select([r], nth_value(r.x, 42) |> over) |> plan() - assert all(query) == - """ - SELECT nth_value(s0."x", 42) OVER () \ - FROM "schema" AS s0\ - """ + assert all(query) == ~s{SELECT nth_value(s0."x", 42) OVER () FROM "schema" AS s0} end test "lag/2 over all" do @@ -1554,10 +1324,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT custom_function(s0."x") OVER () \ - FROM "schema" AS s0\ - """ + ~s{SELECT custom_function(s0."x") OVER () FROM "schema" AS s0} end test "partition by and order by on window" do @@ -1568,23 +1335,18 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT s0."x" \ - FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x", s0."z" ORDER BY s0."x")\ - """ + ~s{SELECT s0."x" FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x", s0."z" ORDER BY s0."x")} end - test "partition by and order by on over" do + test "partition by ond order by over" do query = Schema |> select([r], count(r.x) |> over(partition_by: [r.x, r.z], order_by: r.x)) - |> plan() + + query = query |> plan() assert all(query) == - """ - SELECT count(s0."x") OVER (PARTITION BY s0."x", s0."z" ORDER BY s0."x") \ - FROM "schema" AS s0\ - """ + ~s{SELECT count(s0."x") OVER (PARTITION BY s0."x", s0."z" ORDER BY s0."x") FROM "schema" AS s0} end test "frame clause" do @@ -1599,24 +1361,15 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do frame: fragment("ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING") ) ) - |> plan() + + query = query |> plan() assert all(query) == - """ - SELECT count(s0."x") OVER (\ - PARTITION BY s0."x", \ - s0."z" \ - ORDER BY s0."x" \ - ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING\ - ) \ - FROM "schema" AS s0\ - """ + ~s{SELECT count(s0."x") OVER (PARTITION BY s0."x", s0."z" ORDER BY s0."x" ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) FROM "schema" AS s0} end end - ## ## Joins - ## test "join" do query = @@ -1626,11 +1379,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT 1 \ - FROM "schema" AS s0 \ - INNER JOIN "schema2" AS s1 ON s0."x" = s1."z"\ - """ + ~s{SELECT 1 FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON s0."x" = s1."z"} query = Schema @@ -1640,25 +1389,18 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT 1 FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON s0."x" = s1."z" \ - INNER JOIN "schema" AS s2 ON 1\ - """ + ~s{SELECT 1 FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON s0."x" = s1."z" } <> + ~s{INNER JOIN "schema" AS s2 ON 1} end - test "join hints" do - query = + test "join with hints" do + assert_raise Ecto.QueryError, ~r/join hints are not supported by SQLite3/, fn -> Schema - |> join(:inner, [p], q in Schema2, hints: ["INDEXED BY FOO", "INDEXED BY BAR"]) + |> join(:inner, [p], q in Schema2, hints: ["USE INDEX FOO", "USE INDEX BAR"]) |> select([], true) |> plan() - - assert all(query) == - """ - SELECT 1 \ - FROM "schema" AS s0 \ - INNER JOIN "schema2" AS s1 INDEXED BY FOO INDEXED BY BAR ON 1\ - """ + |> all() + end end test "join with nothing bound" do @@ -1669,11 +1411,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT 1 \ - FROM "schema" AS s0 \ - INNER JOIN "schema2" AS s1 ON s1."z" = s1."z"\ - """ + ~s{SELECT 1 FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON s1."z" = s1."z"} end test "join without schema" do @@ -1684,19 +1422,12 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT 1 \ - FROM "posts" AS p0 \ - INNER JOIN "comments" AS c1 ON p0."x" = c1."z"\ - """ + ~s{SELECT 1 FROM "posts" AS p0 INNER JOIN "comments" AS c1 ON p0."x" = c1."z"} end test "join with subquery" do posts = - "posts" - |> where(title: ^"hello") - |> select([r], %{x: r.x, y: r.y}) - |> subquery() + subquery("posts" |> where(title: ^"hello") |> select([r], %{x: r.x, y: r.y})) query = "comments" @@ -1705,20 +1436,11 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT s1."x" FROM "comments" AS c0 \ - INNER JOIN (\ - SELECT sp0."x" AS "x", sp0."y" AS "y" \ - FROM "posts" AS sp0 \ - WHERE (sp0."title" = ?)\ - ) AS s1 ON 1\ - """ + ~s{SELECT s1."x" FROM "comments" AS c0 } <> + ~s{INNER JOIN (SELECT sp0."x" AS "x", sp0."y" AS "y" FROM "posts" AS sp0 WHERE (sp0."title" = ?)) AS s1 ON 1} posts = - "posts" - |> where(title: ^"hello") - |> select([r], %{x: r.x, z: r.y}) - |> subquery() + subquery("posts" |> where(title: ^"hello") |> select([r], %{x: r.x, z: r.y})) query = "comments" @@ -1727,20 +1449,15 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT s1."x", s1."z" FROM "comments" AS c0 \ - INNER JOIN (\ - SELECT sp0."x" AS "x", sp0."y" AS "z" \ - FROM "posts" AS sp0 \ - WHERE (sp0."title" = ?)\ - ) AS s1 ON 1\ - """ + ~s{SELECT s1."x", s1."z" FROM "comments" AS c0 } <> + ~s{INNER JOIN (SELECT sp0."x" AS "x", sp0."y" AS "z" FROM "posts" AS sp0 WHERE (sp0."title" = ?)) AS s1 ON 1} posts = - "posts" - |> where(title: parent_as(:comment).subtitle) - |> select([r], r.title) - |> subquery() + subquery( + "posts" + |> where(title: parent_as(:comment).subtitle) + |> select([r], r.title) + ) query = "comments" @@ -1750,15 +1467,8 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT s1."title" \ - FROM "comments" AS c0 \ - INNER JOIN (\ - SELECT sp0."title" AS "title" \ - FROM "posts" AS sp0 \ - WHERE (sp0."title" = c0."subtitle")\ - ) AS s1 ON 1\ - """ + ~s{SELECT s1."title" FROM "comments" AS c0 } <> + ~s{INNER JOIN (SELECT sp0."title" AS "title" FROM "posts" AS sp0 WHERE (sp0."title" = c0."subtitle")) AS s1 ON 1} end test "join with prefix" do @@ -1770,11 +1480,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT 1 \ - FROM "prefix"."schema" AS s0 \ - INNER JOIN "prefix"."schema2" AS s1 ON s0."x" = s1."z"\ - """ + ~s{SELECT 1 FROM "prefix"."schema" AS s0 INNER JOIN "prefix"."schema2" AS s1 ON s0."x" = s1."z"} query = Schema @@ -1785,11 +1491,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT 1 \ - FROM "first"."schema" AS s0 \ - INNER JOIN "second"."schema2" AS s1 ON s0."x" = s1."z"\ - """ + ~s{SELECT 1 FROM "first"."schema" AS s0 INNER JOIN "second"."schema2" AS s1 ON s0."x" = s1."z"} end test "join with fragment" do @@ -1809,17 +1511,9 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT s0."id", ? \ - FROM "schema" AS s0 \ - INNER JOIN \ - (\ - SELECT * \ - FROM schema2 AS s2 \ - WHERE s2.id = s0."x" AND s2.field = ?\ - ) AS f1 ON 1 \ - WHERE ((s0."id" > 0) AND (s0."id" < ?))\ - """ + ~s{SELECT s0."id", ? FROM "schema" AS s0 INNER JOIN } <> + ~s{(SELECT * FROM schema2 AS s2 WHERE s2.id = s0."x" AND s2.field = ?) AS f1 ON 1 } <> + ~s{WHERE ((s0."id" > 0) AND (s0."id" < ?))} end test "join with fragment and on defined" do @@ -1830,42 +1524,74 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT s0."id", ? \ - FROM "schema" AS s0 \ - INNER JOIN \ - (SELECT * FROM schema2) AS f1 ON f1."id" = s0."id"\ - """ + ~s{SELECT s0."id", ? FROM "schema" AS s0 INNER JOIN } <> + ~s{(SELECT * FROM schema2) AS f1 ON f1."id" = s0."id"} end test "join with query interpolation" do inner = Ecto.Queryable.to_query(Schema2) + query = from(p in Schema, left_join: c in ^inner, select: {p.id, c.id}) |> plan() + assert all(query) == + "SELECT s0.\"id\", s1.\"id\" FROM \"schema\" AS s0 LEFT OUTER JOIN \"schema2\" AS s1 ON 1" + end + + test "lateral join with fragment" do query = - (p in Schema) - |> from(left_join: c in ^inner, select: {p.id, c.id}) + Schema + |> join( + :inner_lateral, + [p], + q in fragment( + "SELECT * FROM schema2 AS s2 WHERE s2.id = ? AND s2.field = ?", + p.x, + ^10 + ) + ) + |> select([p, q], {p.id, q.z}) + |> where([p], p.id > 0 and p.id < ^100) |> plan() - assert all(query) == - """ - SELECT s0."id", s1."id" \ - FROM "schema" AS s0 \ - LEFT OUTER JOIN "schema2" AS s1 ON 1\ - """ + assert_raise Ecto.QueryError, fn -> + all(query) + end + end + + test "cross lateral join with fragment" do + query = + Schema + |> join( + :cross_lateral, + [p], + q in fragment( + "SELECT * FROM schema2 AS s2 WHERE s2.id = ? AND s2.field = ?", + p.x, + ^10 + ) + ) + |> select([p, q], {p.id, q.z}) + |> where([p], p.id > 0 and p.id < ^100) + |> plan() + + assert_raise Ecto.QueryError, fn -> + all(query) + end end test "cross join" do + query = from(p in Schema, cross_join: c in Schema2, select: {p.id, c.id}) |> plan() + + assert all(query) == + "SELECT s0.\"id\", s1.\"id\" FROM \"schema\" AS s0 CROSS JOIN \"schema2\" AS s1" + end + + test "cross join with fragment" do query = - (p in Schema) - |> from(cross_join: c in Schema2, select: {p.id, c.id}) + from(p in Schema, cross_join: fragment("json_each(?)", p.j), select: {p.id}) |> plan() assert all(query) == - """ - SELECT s0."id", s1."id" \ - FROM "schema" AS s0 \ - CROSS JOIN "schema2" AS s1\ - """ + ~s{SELECT s0."id" FROM "schema" AS s0 CROSS JOIN json_each(s0."j") AS f1} end test "join produces correct bindings" do @@ -1874,49 +1600,28 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do query = plan(query) assert all(query) == - """ - SELECT s0."id", s2."id" \ - FROM "schema" AS s0 \ - INNER JOIN "schema2" AS s1 ON 1 \ - INNER JOIN "schema2" AS s2 ON 1\ - """ + "SELECT s0.\"id\", s2.\"id\" FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON 1 INNER JOIN \"schema2\" AS s2 ON 1" end describe "query interpolation parameters" do test "self join on subquery" do subquery = select(Schema, [r], %{x: r.x, y: r.y}) + query = subquery |> join(:inner, [c], p in subquery(subquery), on: true) |> plan() - query = - subquery - |> join(:inner, [c], p in subquery(subquery)) - |> plan() - |> all() - - assert query == - """ - SELECT s0."x", s0."y" \ - FROM "schema" AS s0 \ - INNER JOIN (SELECT ss0."x" AS "x", ss0."y" AS "y" FROM "schema" AS ss0) \ - AS s1 ON 1\ - """ + assert all(query) == + ~s{SELECT s0."x", s0."y" FROM "schema" AS s0 INNER JOIN } <> + ~s{(SELECT ss0."x" AS "x", ss0."y" AS "y" FROM "schema" AS ss0) } <> + ~s{AS s1 ON 1} end test "self join on subquery with fragment" do subquery = select(Schema, [r], %{string: fragment("downcase(?)", ^"string")}) + query = subquery |> join(:inner, [c], p in subquery(subquery), on: true) |> plan() - query = - subquery - |> join(:inner, [c], p in subquery(subquery)) - |> plan() - |> all() - - assert query == - """ - SELECT downcase(?) \ - FROM "schema" AS s0 \ - INNER JOIN (SELECT downcase(?) AS "string" FROM "schema" AS ss0) \ - AS s1 ON 1\ - """ + assert all(query) == + ~s{SELECT downcase(?) FROM "schema" AS s0 INNER JOIN } <> + ~s{(SELECT downcase(?) AS "string" FROM "schema" AS ss0) } <> + ~s{AS s1 ON 1} end test "join on subquery with simple select" do @@ -1925,38 +1630,25 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do query = Schema |> select([r], %{y: ^666}) - |> join(:inner, [c], p in subquery(subquery)) + |> join(:inner, [c], p in subquery(subquery), on: true) |> where([a, b], a.x == ^111) |> plan() - |> all() - - assert query == - """ - SELECT ? \ - FROM "schema" AS s0 \ - INNER JOIN (SELECT ? AS "x", ? AS "w" FROM "schema" AS ss0) AS s1 ON 1 \ - WHERE (s0."x" = ?)\ - """ + + assert all(query) == + ~s{SELECT ? FROM "schema" AS s0 INNER JOIN } <> + ~s{(SELECT ? AS "x", ? AS "w" FROM "schema" AS ss0) AS s1 ON 1 } <> + ~s{WHERE (s0."x" = ?)} end end - ## ## Associations - ## test "association join belongs_to" do query = - Schema2 - |> join(:inner, [c], p in assoc(c, :post)) - |> select([], true) - |> plan() + Schema2 |> join(:inner, [c], p in assoc(c, :post)) |> select([], true) |> plan() assert all(query) == - """ - SELECT 1 \ - FROM "schema2" AS s0 \ - INNER JOIN "schema" AS s1 ON s1."x" = s0."z"\ - """ + "SELECT 1 FROM \"schema2\" AS s0 INNER JOIN \"schema\" AS s1 ON s1.\"x\" = s0.\"z\"" end test "association join has_many" do @@ -1967,11 +1659,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT 1 \ - FROM "schema" AS s0 \ - INNER JOIN "schema2" AS s1 ON s1."z" = s0."x"\ - """ + "SELECT 1 FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON s1.\"z\" = s0.\"x\"" end test "association join has_one" do @@ -1982,108 +1670,80 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - """ - SELECT 1 \ - FROM "schema" AS s0 \ - INNER JOIN "schema3" AS s1 ON s1."id" = s0."y"\ - """ + "SELECT 1 FROM \"schema\" AS s0 INNER JOIN \"schema3\" AS s1 ON s1.\"id\" = s0.\"y\"" end - ## - ## Schema based - ## + # Schema based test "insert" do - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:raise, [], []}, []) - assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?)} - - assert_raise( - ArgumentError, - "Cell-wise default values are not supported on INSERT statements by SQLite3", - fn -> - insert( - nil, - "schema", - [:x, :y], - [[:x, :y], [nil, :z]], - {:raise, [], []}, - [] - ) - end - ) + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:raise, [], []}, [:id]) + assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) RETURNING "id"} + + assert_raise ArgumentError, fn -> + insert(nil, "schema", [:x, :y], [[:x, :y], [nil, :z]], {:raise, [], []}, [:id]) + end + + assert_raise ArgumentError, fn -> + insert(nil, "schema", [:x, :y], [[:x, :y], [nil, :z]], {:raise, [], []}, [:id], [ + 1, + 2 + ]) + end + + query = insert(nil, "schema", [], [[]], {:raise, [], []}, [:id]) + assert query == ~s{INSERT INTO "schema" DEFAULT VALUES RETURNING "id"} query = insert(nil, "schema", [], [[]], {:raise, [], []}, []) assert query == ~s{INSERT INTO "schema" DEFAULT VALUES} query = insert("prefix", "schema", [], [[]], {:raise, [], []}, []) assert query == ~s{INSERT INTO "prefix"."schema" DEFAULT VALUES} - - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:raise, [], []}, [:id]) - assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) RETURNING "id"} - - assert_raise( - ArgumentError, - "Cell-wise default values are not supported on INSERT statements by SQLite3", - fn -> - insert(nil, "schema", [:x, :y], [[:x, :y], [nil, :z]], {:raise, [], []}, [:id]) - end - ) end test "insert with on conflict" do - # These tests are adapted from the Postgres Adaptor - # For :nothing query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:nothing, [], []}, []) assert query == - """ - INSERT INTO "schema" ("x","y") \ - VALUES (?,?) \ - ON CONFLICT DO NOTHING\ - """ + ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) ON CONFLICT DO NOTHING} query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:nothing, [], [:x, :y]}, []) assert query == - """ - INSERT INTO "schema" ("x","y") \ - VALUES (?,?) \ - ON CONFLICT ("x","y") DO NOTHING\ - """ + ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) ON CONFLICT ("x","y") DO NOTHING} - # For :replace_all - assert_raise( - ArgumentError, - "Upsert in SQLite3 requires :conflict_target", - fn -> - conflict_target = [] - - insert( - nil, - "schema", - [:x, :y], - [[:x, :y]], - {:replace_all, [], conflict_target}, - [] - ) - end - ) + # For :update + update = from("schema", update: [set: [z: "foo"]]) |> plan(:update_all) + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) - assert_raise( - ArgumentError, - "Upsert in SQLite3 does not support ON CONSTRAINT", - fn -> - insert( - nil, - "schema", - [:x, :y], - [[:x, :y]], - {:replace_all, [], {:constraint, :foo}}, - [] - ) - end - ) + assert query == + ~s{INSERT INTO "schema" AS s0 ("x","y") VALUES (?,?) ON CONFLICT ("x","y") DO UPDATE SET "z" = 'foo' RETURNING "z"} + + assert_raise ArgumentError, "Upsert in SQLite3 requires :conflict_target", fn -> + conflict_target = [] + + insert( + nil, + "schema", + [:x, :y], + [[:x, :y]], + {:replace_all, [], conflict_target}, + [] + ) + end + + assert_raise ArgumentError, + "Upsert in SQLite3 does not support ON CONSTRAINT", + fn -> + insert( + nil, + "schema", + [:x, :y], + [[:x, :y]], + {:replace_all, [], {:constraint, :foo}}, + [] + ) + end query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:replace_all, [], [:id]}, []) @@ -2097,69 +1757,73 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do end test "insert with query" do - select_query = from("schema", select: [:id]) |> plan(:all) - - assert_raise( - ArgumentError, - "Cell-wise default values are not supported on INSERT statements by SQLite3", - fn -> - insert( - nil, - "schema", - [:x, :y, :z], - [[:x, {select_query, 2}, :z], [nil, nil, {select_query, 1}]], - {:raise, [], []}, - [] - ) - end - ) + query = from("schema", select: [:id]) |> plan(:all) + + assert_raise ArgumentError, fn -> + insert( + nil, + "schema", + [:x, :y, :z], + [[:x, {query, 3}, :z], [nil, {query, 2}, :z]], + {:raise, [], []}, + [:id] + ) + end end test "insert with query as rows" do + query = from(s in "schema", select: %{foo: fragment("3"), bar: s.bar}) |> plan(:all) + + assert_raise ArgumentError, fn -> + insert(nil, "schema", [:foo, :bar], query, {:raise, [], []}, [:foo]) + end + query = from(s in "schema", select: %{foo: fragment("3"), bar: s.bar}, where: true) |> plan(:all) - query = insert(nil, "schema", [:foo, :bar], query, {:raise, [], []}, []) + query = insert(nil, "schema", [:foo, :bar], query, {:raise, [], []}, [:foo]) assert query == - ~s{INSERT INTO "schema" ("foo","bar") SELECT 3, s0."bar" FROM "schema" AS s0 WHERE (1)} + ~s{INSERT INTO "schema" ("foo","bar") SELECT 3, s0."bar" FROM "schema" AS s0 WHERE (1) RETURNING "foo"} + end - assert_raise(ArgumentError, fn -> - query = - (s in "schema") - |> from(select: %{foo: fragment("3"), bar: s.bar}) - |> plan(:all) + test "update" do + query = update(nil, "schema", [:x, :y], [id: 1], []) + assert query == ~s{UPDATE "schema" SET "x" = ?, "y" = ? WHERE "id" = ?} - insert(nil, "schema", [:foo, :bar], query, {:raise, [], []}, []) - end) - end + query = update(nil, "schema", [:x, :y], [id: 1], [:z]) - # test "update" do - # query = update(nil, "schema", [:x, :y], [:id], []) - # assert query == ~s{UPDATE schema SET x = ?, y = ? WHERE id = ?} - # - # query = update(nil, "schema", [:x, :y], [:id], []) - # assert query == ~s{UPDATE schema SET x = ?, y = ? WHERE id = ?} - # - # query = update("prefix", "schema", [:x, :y], [:id], []) - # assert query == ~s{UPDATE prefix.schema SET x = ?, y = ? WHERE id = ?} - # end + assert query == + ~s{UPDATE "schema" SET "x" = ?, "y" = ? WHERE "id" = ? RETURNING "z"} + + query = update("prefix", "schema", [:x, :y], [id: 1], []) + assert query == ~s{UPDATE "prefix"."schema" SET "x" = ?, "y" = ? WHERE "id" = ?} + + query = update("prefix", "schema", [:x, :y], [id: 1, updated_at: nil], []) + + assert query == + ~s{UPDATE "prefix"."schema" SET "x" = ?, "y" = ? WHERE "id" = ? AND "updated_at" IS NULL} + end test "delete" do query = delete(nil, "schema", [x: 1, y: 2], []) assert query == ~s{DELETE FROM "schema" WHERE "x" = ? AND "y" = ?} + query = delete(nil, "schema", [x: 1, y: 2], [:z]) + assert query == ~s{DELETE FROM "schema" WHERE "x" = ? AND "y" = ? RETURNING "z"} + query = delete("prefix", "schema", [x: 1, y: 2], []) assert query == ~s{DELETE FROM "prefix"."schema" WHERE "x" = ? AND "y" = ?} - query = delete(nil, "schema", [x: nil, y: 1], []) - assert query == ~s{DELETE FROM "schema" WHERE "x" IS NULL AND "y" = ?} + query = delete("prefix", "schema", [x: nil, y: 1], []) + assert query == ~s{DELETE FROM "prefix"."schema" WHERE "x" IS NULL AND "y" = ?} end - ## - ## DDL - ## + # DDL + + alias Ecto.Migration.Reference + import Ecto.Migration, only: [table: 1, table: 2, index: 2, index: 3, constraint: 3] test "executing a string during migration" do assert execute_ddl("example") == ["example"] @@ -2170,40 +1834,29 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do {:create, table(:posts), [ {:add, :name, :string, [default: "Untitled", size: 20, null: false]}, - {:add, :token, :binary, [size: 20, null: false]}, {:add, :price, :numeric, [precision: 8, scale: 2, default: {:fragment, "expr"}]}, {:add, :on_hand, :integer, [default: 0, null: true]}, - {:add, :likes, :integer, [default: 0, null: false]}, - {:add, :published_at, :utc_datetime, [null: true]}, {:add, :is_active, :boolean, [default: true]}, - {:add, :notes, :text, [collate: :nocase]}, - {:add, :meta, :text, [check: %{name: "meta_constraint", expr: "meta != 'a'"}]} + {:add, :tags, {:array, :string}, [default: []]}, + {:add, :languages, {:array, :string}, [default: ["pt", "es"]]}, + {:add, :limits, {:array, :integer}, [default: [100, 30_000]]} ]} assert execute_ddl(create) == [ """ - CREATE TABLE "posts" (\ - "name" TEXT DEFAULT 'Untitled' NOT NULL, \ - "token" BLOB NOT NULL, \ - "price" NUMERIC DEFAULT expr, \ - "on_hand" INTEGER DEFAULT 0 NULL, \ - "likes" INTEGER DEFAULT 0 NOT NULL, \ - "published_at" TEXT NULL, \ - "is_active" INTEGER DEFAULT true, \ - "notes" TEXT COLLATE NOCASE, \ - "meta" TEXT CONSTRAINT meta_constraint CHECK (meta != 'a')\ - )\ + CREATE TABLE "posts" ("name" TEXT DEFAULT 'Untitled' NOT NULL, + "price" NUMERIC DEFAULT expr, + "on_hand" INTEGER DEFAULT 0 NULL, + "is_active" INTEGER DEFAULT true, + "tags" TEXT DEFAULT ('[]'), + "languages" TEXT DEFAULT ('["pt","es"]'), + "limits" TEXT DEFAULT ('[100,30000]')) """ + |> remove_newlines ] end - test "create empty table" do - create = {:create, table(:posts), []} - - assert execute_ddl(create) == [~s{CREATE TABLE "posts" ()}] - end - test "create table with prefix" do create = {:create, table(:posts, prefix: :foo), @@ -2211,10 +1864,43 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert execute_ddl(create) == [ """ - CREATE TABLE "foo"."posts" (\ - "category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "foo"."categories"("id")\ - )\ + CREATE TABLE "foo"."posts" + ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "foo"."categories"("id")) """ + |> remove_newlines + ] + end + + test "raise on table comment" do + create = + {:create, table(:posts, comment: "comment"), + [ + {:add, :category_0, %Reference{table: :categories}, + [comment: "column comment"]}, + {:add, :created_at, :timestamp, []}, + {:add, :updated_at, :timestamp, [comment: "column comment 2"]} + ]} + + assert_raise ArgumentError, ~r/comment/, fn -> + execute_ddl(create) + end + end + + # TODO should we raise on comment? + test "create table with comment on columns" do + create = + {:create, table(:posts), + [ + {:add, :category_0, %Reference{table: :categories}, []}, + {:add, :created_at, :timestamp, []}, + {:add, :updated_at, :timestamp, []} + ]} + + assert execute_ddl(create) == [ + remove_newlines(""" + CREATE TABLE "posts" + ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), "created_at" TEXT, "updated_at" TEXT) + """) ] end @@ -2230,31 +1916,40 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do [null: false]}, {:add, :category_4, %Reference{table: :categories, on_delete: :nilify_all}, []}, - {:add, :category_5, - %Reference{table: :categories, prefix: :foo, on_delete: :nilify_all}, []}, - {:add, :category_6, - %Reference{table: :categories, with: [here: :there], on_delete: :nilify_all}, + {:add, :category_5, %Reference{table: :categories, on_update: :nothing}, []}, + {:add, :category_6, %Reference{table: :categories, on_update: :update_all}, + [null: false]}, + {:add, :category_7, %Reference{table: :categories, on_update: :nilify_all}, []}, - {:add, :category_7, - %Reference{table: :tags, with: [that: :this], on_delete: :nilify_all}, []} + {:add, :category_8, + %Reference{ + table: :categories, + on_delete: :nilify_all, + on_update: :update_all + }, [null: false]}, + {:add, :category_9, %Reference{table: :categories, on_delete: :restrict}, []}, + {:add, :category_10, %Reference{table: :categories, on_update: :restrict}, []}, + {:add, :category_11, + %Reference{table: :categories, prefix: "foo", on_update: :restrict}, []} ]} assert execute_ddl(create) == [ """ - CREATE TABLE "posts" (\ - "id" INTEGER PRIMARY KEY AUTOINCREMENT, \ - "category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), \ - "category_1" INTEGER CONSTRAINT "foo_bar" REFERENCES "categories"("id"), \ - "category_2" INTEGER CONSTRAINT "posts_category_2_fkey" REFERENCES "categories"("id"), \ - "category_3" INTEGER NOT NULL CONSTRAINT "posts_category_3_fkey" REFERENCES "categories"("id") ON DELETE CASCADE, \ - "category_4" INTEGER CONSTRAINT "posts_category_4_fkey" REFERENCES "categories"("id") ON DELETE SET NULL, \ - "category_5" INTEGER CONSTRAINT "posts_category_5_fkey" REFERENCES "foo"."categories"("id") ON DELETE SET NULL, \ - "category_6" INTEGER, \ - "category_7" INTEGER, \ - FOREIGN KEY ("category_6","here") REFERENCES "categories"("id","there") ON DELETE SET NULL, \ - FOREIGN KEY ("category_7","that") REFERENCES "tags"("id","this") ON DELETE SET NULL\ - )\ - """ + CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, + "category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), + "category_1" INTEGER CONSTRAINT "foo_bar" REFERENCES "categories"("id"), + "category_2" INTEGER CONSTRAINT "posts_category_2_fkey" REFERENCES "categories"("id"), + "category_3" INTEGER NOT NULL CONSTRAINT "posts_category_3_fkey" REFERENCES "categories"("id") ON DELETE CASCADE, + "category_4" INTEGER CONSTRAINT "posts_category_4_fkey" REFERENCES "categories"("id") ON DELETE SET NULL, + "category_5" INTEGER CONSTRAINT "posts_category_5_fkey" REFERENCES "categories"("id"), + "category_6" INTEGER NOT NULL CONSTRAINT "posts_category_6_fkey" REFERENCES "categories"("id") ON UPDATE CASCADE, + "category_7" INTEGER CONSTRAINT "posts_category_7_fkey" REFERENCES "categories"("id") ON UPDATE SET NULL, + "category_8" INTEGER NOT NULL CONSTRAINT "posts_category_8_fkey" REFERENCES "categories"("id") ON DELETE SET NULL ON UPDATE CASCADE, + "category_9" INTEGER CONSTRAINT "posts_category_9_fkey" REFERENCES "categories"("id") ON DELETE RESTRICT, + "category_10" INTEGER CONSTRAINT "posts_category_10_fkey" REFERENCES "categories"("id") ON UPDATE RESTRICT, + "category_11" INTEGER CONSTRAINT "posts_category_11_fkey" REFERENCES "foo"."categories"("id") ON UPDATE RESTRICT) + """ + |> remove_newlines ] end @@ -2262,29 +1957,14 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do create = {:create, table(:posts, options: "WITH FOO=BAR"), [ - {:add, :id, :serial, [primary_key: true]} + {:add, :id, :serial, [primary_key: true]}, + {:add, :created_at, :naive_datetime, []} ]} - assert execute_ddl(create) == [ - """ - CREATE TABLE "posts" (\ - "id" INTEGER PRIMARY KEY AUTOINCREMENT\ - ) \ - WITH FOO=BAR\ - """ - ] - end - - test "create table with list as options" do - assert_raise( - ArgumentError, - "SQLite3 adapter does not support keyword lists in :options", - fn -> - {:create, table(:posts, options: ["WITH FOO=BAR"]), - [{:add, :id, :serial, [primary_key: true]}, {:add, :created_at, :datetime, []}]} - |> execute_ddl() - end - ) + assert execute_ddl(create) == + [ + ~s|CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "created_at" TEXT) WITH FOO=BAR| + ] end test "create table with composite key" do @@ -2298,16 +1978,55 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert execute_ddl(create) == [ """ - CREATE TABLE "posts" (\ - "a" INTEGER, \ - "b" INTEGER, \ - "name" TEXT, \ - PRIMARY KEY ("a", "b")\ - )\ + CREATE TABLE "posts" ("a" INTEGER, "b" INTEGER, "name" TEXT, PRIMARY KEY ("a","b")) + """ + |> remove_newlines + ] + end + + test "create table with binary column and UTF-8 default" do + create = {:create, table(:blobs), [{:add, :blob, :binary, [default: "foo"]}]} + + assert execute_ddl(create) == [ + """ + CREATE TABLE "blobs" ("blob" BLOB DEFAULT 'foo') + """ + |> remove_newlines + ] + end + + test "create table with binary column and hex blob literal default" do + create = {:create, table(:blobs), [{:add, :blob, :binary, [default: "\\x666F6F"]}]} + + assert execute_ddl(create) == [ + """ + CREATE TABLE "blobs" ("blob" BLOB DEFAULT '\\\\x666F6F') + """ + |> remove_newlines + ] + end + + test "create table with binary column and hex blob literal null-byte" do + create = {:create, table(:blobs), [{:add, :blob, :binary, [default: "\\\x00"]}]} + + assert execute_ddl(create) == [ + """ + CREATE TABLE "blobs" ("blob" BLOB DEFAULT '\\\\\x00') """ + |> remove_newlines ] end + test "create table with a map column, and an empty map default" do + create = + {:create, table(:posts), + [ + {:add, :a, :map, [default: %{}]} + ]} + + assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("a" TEXT DEFAULT ('{}'))|] + end + test "create table with a map column, and a map default with values" do create = {:create, table(:posts), @@ -2316,21 +2035,19 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do ]} assert execute_ddl(create) == [ - """ - CREATE TABLE "posts" ("a" JSON DEFAULT ('{\"baz\":\"boom\",\"foo\":\"bar\"}'))\ - """ + ~s|CREATE TABLE "posts" ("a" TEXT DEFAULT ('{"baz":"boom","foo":"bar"}'))| ] end - test "create table with sql keyword as column name" do - create = {:create, table(:posts), [{:add, :order, :integer, []}]} + test "create table with a map column, and a string default" do + create = + {:create, table(:posts), + [ + {:add, :a, :map, [default: ~s|{"foo":"bar","baz":"boom"}|]} + ]} assert execute_ddl(create) == [ - """ - CREATE TABLE "posts" (\ - "order" INTEGER\ - )\ - """ + ~s|CREATE TABLE "posts" ("a" TEXT DEFAULT '{"foo":"bar","baz":"boom"}')| ] end @@ -2340,12 +2057,20 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do [{:add, :published_at, :time, [precision: 3]}, {:add, :submitted_at, :time, []}]} assert execute_ddl(create) == [ - """ - CREATE TABLE "posts" (\ - "published_at" TIME, \ - "submitted_at" TIME\ - )\ - """ + ~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)| + ] + end + + test "create table with time_usec columns" do + create = + {:create, table(:posts), + [ + {:add, :published_at, :time_usec, [precision: 3]}, + {:add, :submitted_at, :time_usec, []} + ]} + + assert execute_ddl(create) == [ + ~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)| ] end @@ -2358,12 +2083,20 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do ]} assert execute_ddl(create) == [ - """ - CREATE TABLE "posts" (\ - "published_at" TEXT, \ - "submitted_at" TEXT\ - )\ - """ + ~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)| + ] + end + + test "create table with utc_datetime_usec columns" do + create = + {:create, table(:posts), + [ + {:add, :published_at, :utc_datetime_usec, [precision: 3]}, + {:add, :submitted_at, :utc_datetime_usec, []} + ]} + + assert execute_ddl(create) == [ + ~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)| ] end @@ -2376,159 +2109,116 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do ]} assert execute_ddl(create) == [ - ~s{CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)} + ~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)| + ] + end + + test "create table with naive_datetime_usec columns" do + create = + {:create, table(:posts), + [ + {:add, :published_at, :naive_datetime_usec, [precision: 3]}, + {:add, :submitted_at, :naive_datetime_usec, []} + ]} + + assert execute_ddl(create) == [ + ~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)| ] end test "create table with an unsupported type" do - assert_raise( - ArgumentError, - fn -> - {:create, table(:posts), - [ - {:add, :a, {:a, :b, :c}, [default: %{}]} - ]} - |> execute_ddl() - end - ) + create = + {:create, table(:posts), + [ + {:add, :a, {:a, :b, :c}, [default: %{}]} + ]} + + assert_raise ArgumentError, + "unsupported type `{:a, :b, :c}`. " <> + "The type can either be an atom, a string or a tuple of the form " <> + "`{:map, t}` or `{:array, t}` where `t` itself follows the same conditions.", + fn -> execute_ddl(create) end end test "drop table" do - drop = {:drop, table(:posts), :restrict} - + drop = {:drop, table(:posts)} assert execute_ddl(drop) == [~s|DROP TABLE "posts"|] end - test "drop table with prefixes" do - drop = {:drop, table(:posts, prefix: :foo), :restrict} - + test "drop table with prefix" do + drop = {:drop, table(:posts, prefix: :foo)} assert execute_ddl(drop) == [~s|DROP TABLE "foo"."posts"|] end - test "drop constraint" do - assert_raise( - ArgumentError, - ~r/SQLite3 does not support ALTER TABLE DROP CONSTRAINT./, - fn -> - execute_ddl( - {:drop, constraint(:products, "price_must_be_positive", prefix: :foo), - :restrict} - ) - end - ) - end - - test "drop_if_exists constraint" do - assert_raise( - ArgumentError, - ~r/SQLite3 does not support ALTER TABLE DROP CONSTRAINT./, - fn -> - execute_ddl( - {:drop_if_exists, - constraint(:products, "price_must_be_positive", prefix: :foo), :restrict} - ) - end - ) - end + test "drop table with cascade" do + drop = {:drop, table(:posts), :cascade} - test "alter table" do - alter = - {:alter, table(:posts), - [ - {:add, :title, :string, [default: "Untitled", size: 100, null: false]}, - {:add, :author_id, %Reference{table: :author}, []} - ]} + assert_raise ArgumentError, fn -> + execute_ddl(drop) + end - assert execute_ddl(alter) == [ - """ - ALTER TABLE "posts" \ - ADD COLUMN "title" TEXT DEFAULT 'Untitled' NOT NULL\ - """, - """ - ALTER TABLE "posts" \ - ADD COLUMN "author_id" INTEGER CONSTRAINT "posts_author_id_fkey" REFERENCES "author"("id")\ - """ - ] + assert_raise ArgumentError, fn -> + execute_ddl(drop) + end end - test "alter table with datetime not null" do + test "alter table" do alter = {:alter, table(:posts), [ {:add, :title, :string, [default: "Untitled", size: 100, null: false]}, - {:add, :when, :utc_datetime, [null: false]} + {:add, :author_id, %Reference{table: :author}, []}, + {:add, :category_id, %Reference{table: :categories, validate: false}, []}, + {:remove, :summary}, + {:remove, :body, :text, []}, + {:remove, :space_id, %Reference{table: :author}, []} ]} assert execute_ddl(alter) == [ - """ - ALTER TABLE "posts" \ - ADD COLUMN "title" TEXT DEFAULT 'Untitled' NOT NULL\ - """, - """ - ALTER TABLE "posts" \ - ADD COLUMN "when" TEXT\ - """ + ~s|ALTER TABLE "posts" ADD COLUMN "title" TEXT DEFAULT 'Untitled' NOT NULL|, + ~s|ALTER TABLE "posts" ADD COLUMN "author_id" INTEGER CONSTRAINT "posts_author_id_fkey" REFERENCES "author"("id")|, + ~s|ALTER TABLE "posts" ADD COLUMN "category_id" INTEGER CONSTRAINT "posts_category_id_fkey" REFERENCES "categories"("id")|, + ~s|ALTER TABLE "posts" DROP COLUMN "summary"|, + ~s|ALTER TABLE "posts" DROP COLUMN "body"|, + ~s|ALTER TABLE "posts" DROP COLUMN "space_id"| ] end test "alter table with prefix" do alter = {:alter, table(:posts, prefix: :foo), - [ - {:add, :title, :string, [default: "Untitled", size: 100, null: false]}, - {:add, :author_id, %Reference{table: :author}, []} - ]} + [{:add, :author_id, %Reference{table: :author}, []}]} assert execute_ddl(alter) == [ """ - ALTER TABLE "foo"."posts" \ - ADD COLUMN "title" TEXT DEFAULT 'Untitled' NOT NULL\ - """, - """ - ALTER TABLE "foo"."posts" \ - ADD COLUMN "author_id" INTEGER \ - CONSTRAINT "posts_author_id_fkey" REFERENCES "foo"."author"("id")\ + ALTER TABLE "foo"."posts" + ADD COLUMN "author_id" INTEGER CONSTRAINT "posts_author_id_fkey" REFERENCES "foo"."author"("id") """ + |> remove_newlines ] end - test "alter column errors for :modify column" do - assert_raise( - ArgumentError, - "ALTER COLUMN not supported by SQLite3", - fn -> - {:alter, table(:posts), - [ - {:modify, :price, :numeric, [precision: 8, scale: 2]} - ]} - |> execute_ddl() - end - ) - end - - test "alter table removes column" do - alteration = { - :alter, - table(:posts), - [{:remove, :price, :numeric, [precision: 8, scale: 2]}] - } + test "alter table with serial primary key" do + alter = {:alter, table(:posts), [{:add, :my_pk, :serial, [primary_key: true]}]} - assert execute_ddl(alteration) == [ + assert execute_ddl(alter) == [ """ - ALTER TABLE "posts" \ - DROP COLUMN "price"\ + ALTER TABLE "posts" + ADD COLUMN "my_pk" INTEGER PRIMARY KEY AUTOINCREMENT """ + |> remove_newlines ] end - test "alter table with primary key" do - alter = {:alter, table(:posts), [{:add, :my_pk, :serial, [primary_key: true]}]} + test "alter table with bigserial primary key" do + alter = {:alter, table(:posts), [{:add, :my_pk, :bigserial, [primary_key: true]}]} assert execute_ddl(alter) == [ """ - ALTER TABLE "posts" \ - ADD COLUMN "my_pk" INTEGER PRIMARY KEY AUTOINCREMENT\ + ALTER TABLE "posts" + ADD COLUMN "my_pk" INTEGER PRIMARY KEY AUTOINCREMENT """ + |> remove_newlines ] end @@ -2537,229 +2227,184 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert execute_ddl(create) == [ - """ - CREATE INDEX "posts_category_id_permalink_index" \ - ON "posts" ("category_id", "permalink")\ - """ + ~s|CREATE INDEX "posts_category_id_permalink_index" ON "posts" ("category_id", "permalink")| ] create = {:create, index(:posts, ["lower(permalink)"], name: "posts$main")} - assert execute_ddl(create) == [ - """ - CREATE INDEX "posts$main" ON "posts" (lower(permalink))\ - """ - ] - end - - test "create index if not exists" do - create = {:create_if_not_exists, index(:posts, [:category_id, :permalink])} - query = execute_ddl(create) - - assert query == [ - """ - CREATE INDEX IF NOT EXISTS "posts_category_id_permalink_index" \ - ON "posts" ("category_id", "permalink")\ - """ - ] + assert execute_ddl(create) == + [~s|CREATE INDEX "posts$main" ON "posts" (lower(permalink))|] end test "create index with prefix" do create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo)} - assert execute_ddl(create) == [ - """ - CREATE INDEX "posts_category_id_permalink_index" \ - ON "foo"."posts" ("category_id", "permalink")\ - """ - ] + assert execute_ddl(create) == + [ + ~s|CREATE INDEX "posts_category_id_permalink_index" ON "foo"."posts" ("category_id", "permalink")| + ] create = {:create, index(:posts, ["lower(permalink)"], name: "posts$main", prefix: :foo)} - assert execute_ddl(create) == [ - """ - CREATE INDEX "posts$main" ON "foo"."posts" (lower(permalink))\ - """ - ] + assert execute_ddl(create) == + [~s|CREATE INDEX "posts$main" ON "foo"."posts" (lower(permalink))|] end - test "create index with comment" do + test "raise on create index with comment" do create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo, comment: "comment")} - assert execute_ddl(create) == [ - """ - CREATE INDEX "posts_category_id_permalink_index" \ - ON "foo"."posts" ("category_id", "permalink")\ - """ - ] - - # NOTE: Comments are not supported by SQLite. DDL query generator will ignore them. + assert_raise ArgumentError, ~r/comment/, fn -> + execute_ddl(create) + end end - test "create unique index" do - create = {:create, index(:posts, [:permalink], unique: true)} + test "create index with comment" do + create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo)} assert execute_ddl(create) == [ - """ - CREATE UNIQUE INDEX "posts_permalink_index" \ - ON "posts" ("permalink")\ - """ + remove_newlines(""" + CREATE INDEX "posts_category_id_permalink_index" ON "foo"."posts" ("category_id", "permalink") + """) ] end - test "create unique index if not exists" do - create = {:create_if_not_exists, index(:posts, [:permalink], unique: true)} - query = execute_ddl(create) + test "create unique index" do + create = {:create, index(:posts, [:permalink], unique: true)} - assert query == [ - """ - CREATE UNIQUE INDEX IF NOT EXISTS "posts_permalink_index" \ - ON "posts" ("permalink")\ - """ - ] + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink")|] end test "create unique index with condition" do create = {:create, index(:posts, [:permalink], unique: true, where: "public IS 1")} - assert execute_ddl(create) == [ - """ - CREATE UNIQUE INDEX "posts_permalink_index" \ - ON "posts" ("permalink") WHERE public IS 1\ - """ - ] + assert execute_ddl(create) == + [ + ~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") WHERE public IS 1| + ] create = {:create, index(:posts, [:permalink], unique: true, where: :public)} - assert execute_ddl(create) == [ - """ - CREATE UNIQUE INDEX "posts_permalink_index" \ - ON "posts" ("permalink") WHERE public\ - """ - ] + assert execute_ddl(create) == + [ + ~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") WHERE public| + ] end - test "create index concurrently" do - # NOTE: SQLite doesn't support CONCURRENTLY, so this isn't included in generated SQL. - create = {:create, index(:posts, [:permalink], concurrently: true)} + test "create index with include fields" do + create = {:create, index(:posts, [:permalink], unique: true, include: [:public])} - assert execute_ddl(create) == [ - ~s|CREATE INDEX "posts_permalink_index" ON "posts" ("permalink")| - ] + assert_raise ArgumentError, fn -> + execute_ddl(create) + end end - test "create unique index concurrently" do - # NOTE: SQLite doesn't support CONCURRENTLY, so this isn't included in generated SQL. - create = {:create, index(:posts, [:permalink], concurrently: true, unique: true)} + test "create unique index with nulls_distinct option" do + create = {:create, index(:posts, [:permalink], unique: true, nulls_distinct: true)} - assert execute_ddl(create) == [ - ~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink")| - ] + assert_raise ArgumentError, fn -> + execute_ddl(create) + end + end + + test "create index concurrently not supported" do + index = index(:posts, [:permalink]) + create = {:create, %{index | concurrently: true}} + + assert_raise ArgumentError, fn -> + execute_ddl(create) + end end test "create an index using a different type" do - # NOTE: SQLite doesn't support USING, so this isn't included in generated SQL. create = {:create, index(:posts, [:permalink], using: :hash)} - assert execute_ddl(create) == [ - ~s|CREATE INDEX "posts_permalink_index" ON "posts" ("permalink")| - ] + assert_raise ArgumentError, fn -> + execute_ddl(create) + end + end + + test "create an index without recursively creating indexes on partitions" do + create = {:create, index(:posts, [:permalink], only: true)} + + assert_raise ArgumentError, fn -> + execute_ddl(create) + end end test "drop index" do - drop = {:drop, index(:posts, [:id], name: "posts$main"), :restrict} + drop = {:drop, index(:posts, [:id], name: "posts$main")} assert execute_ddl(drop) == [~s|DROP INDEX "posts$main"|] end test "drop index with prefix" do - drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo), :restrict} + drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo)} assert execute_ddl(drop) == [~s|DROP INDEX "foo"."posts$main"|] end - test "drop index if exists" do - drop = {:drop_if_exists, index(:posts, [:id], name: "posts$main"), :restrict} - assert execute_ddl(drop) == [~s|DROP INDEX IF EXISTS "posts$main"|] + test "drop index mode not supported" do + assert_raise ArgumentError, fn -> + drop = {:drop, index(:posts, [:id], name: "posts$main"), :restrict} + execute_ddl(drop) + end end - test "drop index concurrently" do - # NOTE: SQLite doesn't support CONCURRENTLY, so this isn't included in generated SQL. - drop = - {:drop, index(:posts, [:id], name: "posts$main", concurrently: true), :restrict} + test "drop index concurrently not supported" do + index = index(:posts, [:id], name: "posts$main") - assert execute_ddl(drop) == [~s|DROP INDEX "posts$main"|] + assert_raise ArgumentError, fn -> + drop = {:drop, %{index | concurrently: true}} + execute_ddl(drop) + end end - test "create check constraint" do - assert_raise( - ArgumentError, - "SQLite3 does not support ALTER TABLE ADD CONSTRAINT.", - fn -> - {:create, constraint(:products, "price_must_be_positive", check: "price > 0")} - |> execute_ddl() - end - ) + test "drop index with cascade" do + assert_raise ArgumentError, fn -> + drop = {:drop, index(:posts, [:id], name: "posts$main"), :cascade} + execute_ddl(drop) + end - assert_raise( - ArgumentError, - "SQLite3 does not support ALTER TABLE ADD CONSTRAINT.", - fn -> - {:create, - constraint(:products, "price_must_be_positive", - check: "price > 0", - prefix: "foo" - )} - |> execute_ddl() - end - ) + assert_raise ArgumentError, fn -> + drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo), :cascade} + execute_ddl(drop) + end end - test "create exclusion constraint" do - assert_raise( - ArgumentError, - "SQLite3 does not support ALTER TABLE ADD CONSTRAINT.", - fn -> - {:create, - constraint(:products, "price_must_be_positive", - exclude: ~s|gist (int4range("from", "to", '[]') WITH &&)| - )} - |> execute_ddl() - end - ) + test "drop constraint" do + assert_raise ArgumentError, + ~r/SQLite3 does not support ALTER TABLE DROP CONSTRAINT./, + fn -> + execute_ddl( + {:drop, + constraint(:products, "price_must_be_positive", prefix: :foo), + :restrict} + ) + end end - test "create constraint with comment" do - assert_raise( - ArgumentError, - "SQLite3 does not support ALTER TABLE ADD CONSTRAINT.", - fn -> - {:create, - constraint(:products, "price_must_be_positive", - check: "price > 0", - prefix: "foo", - comment: "comment" - )} - |> execute_ddl() - end - ) + test "drop_if_exists constraint" do + assert_raise ArgumentError, + ~r/SQLite3 does not support ALTER TABLE DROP CONSTRAINT./, + fn -> + execute_ddl( + {:drop_if_exists, + constraint(:products, "price_must_be_positive", prefix: :foo), + :restrict} + ) + end end test "rename table" do rename = {:rename, table(:posts), table(:new_posts)} - - assert execute_ddl(rename) == [ - ~s|ALTER TABLE "posts" RENAME TO "new_posts"| - ] + assert execute_ddl(rename) == [~s|ALTER TABLE "posts" RENAME TO "new_posts"|] end test "rename table with prefix" do rename = {:rename, table(:posts, prefix: :foo), table(:new_posts, prefix: :foo)} - - assert execute_ddl(rename) == [ - ~s|ALTER TABLE "foo"."posts" RENAME TO "new_posts"| - ] + assert execute_ddl(rename) == [~s|ALTER TABLE "foo"."posts" RENAME TO "new_posts"|] end test "rename column" do @@ -2778,39 +2423,6 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do ] end - test "drop column" do - drop_column = {:alter, table(:posts), [{:remove, :summary}]} - - assert execute_ddl(drop_column) == [ - """ - ALTER TABLE "posts" \ - DROP COLUMN "summary"\ - """ - ] - end - - test "arrays" do - assert_raise( - Ecto.QueryError, - ~r"Array type is not supported by SQLite3", - fn -> - Schema - |> select([], fragment("?", [1, 2, 3])) - |> plan() - |> all() - end - ) - end - - test "preloading" do - query = - from(p in Post, preload: [:comments], select: p) - |> plan() - |> all() - - assert query == ~s{SELECT p0."id", p0."title", p0."content" FROM "posts" AS p0} - end - test "autoincrement support" do serial = {:create, table(:posts), [{:add, :id, :serial, [primary_key: true]}]} bigserial = {:create, table(:posts), [{:add, :id, :bigserial, [primary_key: true]}]} @@ -2828,4 +2440,8 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert execute_ddl(id) == [~s/CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY)/] assert execute_ddl(integer) == [~s/CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY)/] end + + defp remove_newlines(string) do + string |> String.trim() |> String.replace("\n", " ") + end end diff --git a/test/ecto/adapters/sqlite3/pg_connection_test.exs b/test/ecto/adapters/sqlite3/pg_connection_test.exs deleted file mode 100644 index 15ecec8..0000000 --- a/test/ecto/adapters/sqlite3/pg_connection_test.exs +++ /dev/null @@ -1,1808 +0,0 @@ -defmodule Ecto.Adapters.SQLite3Test do - use ExUnit.Case, async: false - - import Ecto.Query - - alias Ecto.Queryable - alias Ecto.Adapters.SQLite3.Connection, as: SQL - - defmodule Schema do - use Ecto.Schema - - schema "schema" do - field :x, :integer - field :y, :integer - field :z, :integer - field :w, {:array, :integer} - field :meta, :map - - has_many :comments, Ecto.Adapters.SQLite3Test.Schema2, - references: :x, - foreign_key: :z - has_one :permalink, Ecto.Adapters.SQLite3Test.Schema3, - references: :y, - foreign_key: :id - end - end - - defmodule Schema2 do - use Ecto.Schema - - schema "schema2" do - belongs_to :post, Ecto.Adapters.SQLite3Test.Schema, - references: :x, - foreign_key: :z - end - end - - defmodule Schema3 do - use Ecto.Schema - - schema "schema3" do - field :list1, {:array, :string} - field :list2, {:array, :integer} - field :binary, :binary - end - end - - defp plan(query, operation \\ :all) do - {query, _cast_params, _dump_params} = Ecto.Adapter.Queryable.plan_query(operation, Ecto.Adapters.SQLite3, query) - query - end - - defp all(query), do: query |> SQL.all |> IO.iodata_to_binary() - defp update_all(query), do: query |> SQL.update_all |> IO.iodata_to_binary() - defp delete_all(query), do: query |> SQL.delete_all |> IO.iodata_to_binary() - defp execute_ddl(query), do: query |> SQL.execute_ddl |> Enum.map(&IO.iodata_to_binary/1) - - defp insert(prefx, table, header, rows, on_conflict, returning, placeholders \\ []) do - IO.iodata_to_binary( - SQL.insert(prefx, table, header, rows, on_conflict, returning, placeholders) - ) - end - - defp update(prefx, table, fields, filter, returning) do - IO.iodata_to_binary(SQL.update(prefx, table, fields, filter, returning)) - end - - defp delete(prefx, table, filter, returning) do - IO.iodata_to_binary(SQL.delete(prefx, table, filter, returning)) - end - - test "from" do - query = Schema |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0} - end - - test "from with hints" do - query = Schema |> from(hints: "INDEXED BY FOO") |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 INDEXED BY FOO} - end - - test "from without schema" do - query = "posts" |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT p0."x" FROM "posts" AS p0} - - query = "posts" |> select([r], fragment("?", r)) |> plan() - assert all(query) == ~s{SELECT p0 FROM "posts" AS p0} - - query = "Posts" |> select([:x]) |> plan() - assert all(query) == ~s{SELECT P0."x" FROM "Posts" AS P0} - - query = "0posts" |> select([:x]) |> plan() - assert all(query) == ~s{SELECT t0."x" FROM "0posts" AS t0} - - assert_raise Ecto.QueryError, ~r"SQLite3 does not support selecting all fields from \"posts\" without a schema", fn -> - all from(p in "posts", select: p) |> plan() - end - end - - test "from with subquery" do - query = subquery("posts" |> select([r], %{x: r.x, y: r.y})) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM (SELECT sp0."x" AS "x", sp0."y" AS "y" FROM "posts" AS sp0) AS s0} - - query = subquery("posts" |> select([r], %{x: r.x, z: r.y})) |> select([r], r) |> plan() - assert all(query) == ~s{SELECT s0."x", s0."z" FROM (SELECT sp0."x" AS "x", sp0."y" AS "z" FROM "posts" AS sp0) AS s0} - - query = subquery(subquery("posts" |> select([r], %{x: r.x, z: r.y})) |> select([r], r)) |> select([r], r) |> plan() - assert all(query) == ~s{SELECT s0."x", s0."z" FROM (SELECT ss0."x" AS "x", ss0."z" AS "z" FROM (SELECT ssp0."x" AS "x", ssp0."y" AS "z" FROM "posts" AS ssp0) AS ss0) AS s0} - end - - test "from with fragment" do - query = from(f in fragment("select ? as x", ^"abc"), select: f.x) |> plan() - assert all(query) == ~s{SELECT f0."x" FROM (select ? as x) AS f0} - - query = from(fragment("select ? as x", ^"abc"), select: fragment("x")) |> plan() - assert all(query) == ~s{SELECT x FROM (select ? as x) AS f0} - - query = from(f in fragment("select_rows(arg)"), select: f.x) |> plan() - assert all(query) == ~s{SELECT f0."x" FROM (select_rows(arg)) AS f0} - - assert_raise Ecto.QueryError, ~r/^SQLite3 does not support/, fn -> - all from(f in fragment("select ? as x", ^"abc"), select: f) |> plan() - end - end - - test "CTE" do - initial_query = - "categories" - |> where([c], is_nil(c.parent_id)) - |> select([c], %{id: c.id, depth: fragment("1")}) - - iteration_query = - "categories" - |> join(:inner, [c], t in "tree", on: t.id == c.parent_id) - |> select([c, t], %{id: c.id, depth: fragment("? + 1", t.depth)}) - - cte_query = initial_query |> union_all(^iteration_query) - - query = - Schema - |> recursive_ctes(true) - |> with_cte("tree", as: ^cte_query) - |> join(:inner, [r], t in "tree", on: t.id == r.category_id) - |> select([r, t], %{x: r.x, category_id: t.id, depth: type(t.depth, :integer)}) - |> plan() - - assert all(query) == - ~s{WITH RECURSIVE "tree" AS } <> - ~s{(SELECT sc0."id" AS "id", 1 AS "depth" FROM "categories" AS sc0 WHERE (sc0."parent_id" IS NULL) } <> - ~s{UNION ALL } <> - ~s{(SELECT c0."id", t1."depth" + 1 FROM "categories" AS c0 } <> - ~s{INNER JOIN "tree" AS t1 ON t1."id" = c0."parent_id")) } <> - ~s{SELECT s0."x", t1."id", CAST(t1."depth" AS INTEGER) } <> - ~s{FROM "schema" AS s0 } <> - ~s{INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"} - end - - @raw_sql_cte """ - SELECT * FROM categories WHERE c.parent_id IS NULL - UNION ALL - SELECT * FROM categories AS c, category_tree AS ct WHERE ct.id = c.parent_id - """ - - test "reference CTE in union" do - comments_scope_query = - "comments" - |> where([c], is_nil(c.deleted_at)) - |> select([c], %{entity_id: c.entity_id, text: c.text}) - - posts_query = - "posts" - |> join(:inner, [p], c in "comments_scope", on: c.entity_id == p.guid) - |> select([p, c], [p.title, c.text]) - - videos_query = - "videos" - |> join(:inner, [v], c in "comments_scope", on: c.entity_id == v.guid) - |> select([v, c], [v.title, c.text]) - - query = - posts_query - |> union_all(^videos_query) - |> with_cte("comments_scope", as: ^comments_scope_query) - |> plan() - - assert all(query) == - ~s{WITH "comments_scope" AS (} <> - ~s{SELECT sc0."entity_id" AS "entity_id", sc0."text" AS "text" } <> - ~s{FROM "comments" AS sc0 WHERE (sc0."deleted_at" IS NULL)) } <> - ~s{SELECT p0."title", c1."text" } <> - ~s{FROM "posts" AS p0 } <> - ~s{INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = p0."guid" } <> - ~s{UNION ALL } <> - ~s{(SELECT v0."title", c1."text" } <> - ~s{FROM "videos" AS v0 } <> - ~s{INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = v0."guid")} - end - - test "fragment CTE" do - query = - Schema - |> recursive_ctes(true) - |> with_cte("tree", as: fragment(@raw_sql_cte)) - |> join(:inner, [p], c in "tree", on: c.id == p.category_id) - |> select([r], r.x) - |> plan() - - assert all(query) == - ~s{WITH RECURSIVE "tree" AS (#{@raw_sql_cte}) } <> - ~s{SELECT s0."x" } <> - ~s{FROM "schema" AS s0 } <> - ~s{INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"} - end - - # TODO should error on lock - test "CTE update_all" do - cte_query = - from(x in Schema, order_by: [asc: :id], limit: 10, lock: "FOR UPDATE SKIP LOCKED", select: %{id: x.id}) - - query = - Schema - |> with_cte("target_rows", as: ^cte_query) - |> join(:inner, [row], target in "target_rows", on: target.id == row.id) - |> select([r, t], r) - |> update(set: [x: 123]) - |> plan(:update_all) - - assert update_all(query) == - ~s{WITH "target_rows" AS } <> - ~s{(SELECT ss0."id" AS "id" FROM "schema" AS ss0 ORDER BY ss0."id" LIMIT 10) } <> - ~s{UPDATE "schema" AS s0 } <> - ~s{SET "x" = 123 } <> - ~s{FROM "target_rows" AS t1 } <> - ~s{WHERE (t1."id" = s0."id") } <> - ~s{RETURNING "id", "x", "y", "z", "w", "meta"} - end - - test "CTE delete_all" do - cte_query = - from(x in Schema, order_by: [asc: :id], limit: 10, inner_join: q in Schema2, on: x.x == q.z, select: %{id: x.id}) - - query = - Schema - |> with_cte("target_rows", as: ^cte_query) - |> select([r, t], r) - |> plan(:delete_all) - - assert delete_all(query) == - ~s{WITH "target_rows" AS } <> - ~s{(SELECT ss0."id" AS "id" FROM "schema" AS ss0 INNER JOIN "schema2" AS ss1 ON ss0."x" = ss1."z" ORDER BY ss0."id" LIMIT 10) } <> - ~s{DELETE FROM "schema" AS s0 } <> - ~s{RETURNING "id", "x", "y", "z", "w", "meta"} - end - - test "parent binding subquery and CTE" do - initial_query = - "categories" - |> where([c], c.id == parent_as(:parent_category).id) - |> select([:id, :parent_id]) - - iteration_query = - "categories" - |> join(:inner, [c], t in "tree", on: t.parent_id == c.id) - |> select([:id, :parent_id]) - - cte_query = initial_query |> union_all(^iteration_query) - - breadcrumbs_query = - "tree" - |> recursive_ctes(true) - |> with_cte("tree", as: ^cte_query) - |> select([t], %{breadcrumbs: fragment("STRING_AGG(?, ' / ')", t.id)}) - - query = - from(c in "categories", - as: :parent_category, - left_lateral_join: b in subquery(breadcrumbs_query), - select: %{id: c.id, breadcrumbs: b.breadcrumbs} - ) - |> plan() - - assert_raise Ecto.QueryError, ~r/join `:left_lateral` not supported by SQLite3/, fn -> - all(query) - end - end - - test "select" do - query = Schema |> select([r], {r.x, r.y}) |> plan() - assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} - - query = Schema |> select([r], [r.x, r.y]) |> plan() - assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} - - query = Schema |> select([r], struct(r, [:x, :y])) |> plan() - assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} - end - - test "aggregates" do - query = Schema |> select([r], count(r.x)) |> plan() - assert all(query) == ~s{SELECT count(s0."x") FROM "schema" AS s0} - - assert_raise Ecto.QueryError, fn -> - query = Schema |> select([r], count(r.x, :distinct)) |> plan() - all(query) - end - - query = Schema |> select([r], count()) |> plan() - assert all(query) == ~s{SELECT count(*) FROM "schema" AS s0} - end - - test "aggregate filters" do - query = Schema |> select([r], count(r.x) |> filter(r.x > 10)) |> plan() - assert all(query) == ~s{SELECT count(s0."x") FILTER (WHERE s0."x" > 10) FROM "schema" AS s0} - - query = Schema |> select([r], count(r.x) |> filter(r.x > 10 and r.x < 50)) |> plan() - assert all(query) == ~s{SELECT count(s0."x") FILTER (WHERE (s0."x" > 10) AND (s0."x" < 50)) FROM "schema" AS s0} - - query = Schema |> select([r], count() |> filter(r.x > 10)) |> plan() - assert all(query) == ~s{SELECT count(*) FILTER (WHERE s0."x" > 10) FROM "schema" AS s0} - end - - test "distinct" do - query = Schema |> distinct([r], true) |> select([r], {r.x, r.y}) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} - - query = Schema |> distinct([r], false) |> select([r], {r.x, r.y}) |> plan() - assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} - - query = Schema |> distinct(true) |> select([r], {r.x, r.y}) |> plan() - assert all(query) == ~s{SELECT DISTINCT s0."x", s0."y" FROM "schema" AS s0} - - query = Schema |> distinct(false) |> select([r], {r.x, r.y}) |> plan() - assert all(query) == ~s{SELECT s0."x", s0."y" FROM "schema" AS s0} - - assert_raise Ecto.QueryError, ~r"DISTINCT with multiple columns is not supported by SQLite3", fn -> - query = Schema |> distinct([r], [r.x, r.y]) |> select([r], {r.x, r.y}) |> plan() - all(query) - end - end - - test "coalesce" do - query = Schema |> select([s], coalesce(s.x, 5)) |> plan() - assert all(query) == ~s{SELECT coalesce(s0."x", 5) FROM "schema" AS s0} - end - - test "where" do - query = Schema |> where([r], r.x == 42) |> where([r], r.y != 43) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WHERE (s0."x" = 42) AND (s0."y" != 43)} - - query = Schema |> where([r], {r.x, r.y} > {1, 2}) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WHERE ((s0."x",s0."y") > (1,2))} - end - - test "or_where" do - query = Schema |> or_where([r], r.x == 42) |> or_where([r], r.y != 43) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WHERE (s0."x" = 42) OR (s0."y" != 43)} - - query = Schema |> or_where([r], r.x == 42) |> or_where([r], r.y != 43) |> where([r], r.z == 44) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WHERE ((s0."x" = 42) OR (s0."y" != 43)) AND (s0."z" = 44)} - end - - test "order by" do - query = Schema |> order_by([r], r.x) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x"} - - query = Schema |> order_by([r], [r.x, r.y]) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x", s0."y"} - - query = Schema |> order_by([r], [asc: r.x, desc: r.y]) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x", s0."y" DESC} - - query = Schema |> order_by([r], [asc_nulls_first: r.x, desc_nulls_first: r.y]) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x" ASC NULLS FIRST, s0."y" DESC NULLS FIRST} - - query = Schema |> order_by([r], [asc_nulls_last: r.x, desc_nulls_last: r.y]) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 ORDER BY s0."x" ASC NULLS LAST, s0."y" DESC NULLS LAST} - - query = Schema |> order_by([r], []) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0} - end - - test "union and union all" do - base_query = Schema |> select([r], r.x) |> order_by(fragment("rand")) |> offset(10) |> limit(5) - union_query1 = Schema |> select([r], r.y) |> order_by([r], r.y) |> offset(20) |> limit(40) - union_query2 = Schema |> select([r], r.z) |> order_by([r], r.z) |> offset(30) |> limit(60) - - query = base_query |> union(^union_query1) |> union(^union_query2) |> plan() - - assert all(query) == - ~s{SELECT s0."x" FROM "schema" AS s0 } <> - ~s{UNION (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> - ~s{UNION (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> - ~s{ORDER BY rand LIMIT 5 OFFSET 10} - - query = base_query |> union_all(^union_query1) |> union_all(^union_query2) |> plan() - - assert all(query) == - ~s{SELECT s0."x" FROM "schema" AS s0 } <> - ~s{UNION ALL (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> - ~s{UNION ALL (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> - ~s{ORDER BY rand LIMIT 5 OFFSET 10} - end - - test "except and except all" do - base_query = Schema |> select([r], r.x) |> order_by(fragment("rand")) |> offset(10) |> limit(5) - except_query1 = Schema |> select([r], r.y) |> order_by([r], r.y) |> offset(20) |> limit(40) - except_query2 = Schema |> select([r], r.z) |> order_by([r], r.z) |> offset(30) |> limit(60) - - query = base_query |> except(^except_query1) |> except(^except_query2) |> plan() - - assert all(query) == - ~s{SELECT s0."x" FROM "schema" AS s0 } <> - ~s{EXCEPT (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> - ~s{EXCEPT (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> - ~s{ORDER BY rand LIMIT 5 OFFSET 10} - - query = base_query |> except_all(^except_query1) |> except_all(^except_query2) |> plan() - - assert_raise Ecto.QueryError, fn -> - all(query) - end - end - - test "intersect and intersect all" do - base_query = Schema |> select([r], r.x) |> order_by(fragment("rand")) |> offset(10) |> limit(5) - intersect_query1 = Schema |> select([r], r.y) |> order_by([r], r.y) |> offset(20) |> limit(40) - intersect_query2 = Schema |> select([r], r.z) |> order_by([r], r.z) |> offset(30) |> limit(60) - - query = base_query |> intersect(^intersect_query1) |> intersect(^intersect_query2) |> plan() - - assert all(query) == - ~s{SELECT s0."x" FROM "schema" AS s0 } <> - ~s{INTERSECT (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> - ~s{INTERSECT (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> - ~s{ORDER BY rand LIMIT 5 OFFSET 10} - - query = - base_query |> intersect_all(^intersect_query1) |> intersect_all(^intersect_query2) |> plan() - - assert_raise Ecto.QueryError, fn -> - all(query) - end - end - - test "limit and offset" do - query = Schema |> limit([r], 3) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 LIMIT 3} - - query = Schema |> offset([r], 5) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 OFFSET 5} - - query = Schema |> offset([r], 5) |> limit([r], 3) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 LIMIT 3 OFFSET 5} - end - - test "lock" do - assert_raise ArgumentError, "locks are not supported by SQLite3", fn -> - Schema |> lock("FOR SHARE NOWAIT") |> select([], true) |> plan() |> all() - end - - assert_raise ArgumentError, "locks are not supported by SQLite3", fn -> - Schema |> lock([p], fragment("UPDATE on ?", p)) |> select([], true) |> plan() |> all() - end - - end - - test "string escape" do - query = "schema" |> where(foo: "'\\ ") |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM \"schema\" AS s0 WHERE (s0.\"foo\" = '''\\\\ ')} - - query = "schema" |> where(foo: "'") |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = '''')} - end - - test "binary ops" do - query = Schema |> select([r], r.x == 2) |> plan() - assert all(query) == ~s{SELECT s0."x" = 2 FROM "schema" AS s0} - - query = Schema |> select([r], r.x != 2) |> plan() - assert all(query) == ~s{SELECT s0."x" != 2 FROM "schema" AS s0} - - query = Schema |> select([r], r.x <= 2) |> plan() - assert all(query) == ~s{SELECT s0."x" <= 2 FROM "schema" AS s0} - - query = Schema |> select([r], r.x >= 2) |> plan() - assert all(query) == ~s{SELECT s0."x" >= 2 FROM "schema" AS s0} - - query = Schema |> select([r], r.x < 2) |> plan() - assert all(query) == ~s{SELECT s0."x" < 2 FROM "schema" AS s0} - - query = Schema |> select([r], r.x > 2) |> plan() - assert all(query) == ~s{SELECT s0."x" > 2 FROM "schema" AS s0} - - query = Schema |> select([r], r.x + 2) |> plan() - assert all(query) == ~s{SELECT s0."x" + 2 FROM "schema" AS s0} - end - - test "is_nil" do - query = Schema |> select([r], is_nil(r.x)) |> plan() - assert all(query) == ~s{SELECT s0."x" IS NULL FROM "schema" AS s0} - - query = Schema |> select([r], not is_nil(r.x)) |> plan() - assert all(query) == ~s{SELECT NOT (s0."x" IS NULL) FROM "schema" AS s0} - - query = "schema" |> select([r], r.x == is_nil(r.y)) |> plan() - assert all(query) == ~s{SELECT s0."x" = (s0."y" IS NULL) FROM "schema" AS s0} - end - - test "fragments" do - query = Schema |> select([r], fragment("now")) |> plan() - assert all(query) == ~s{SELECT now FROM "schema" AS s0} - - query = Schema |> select([r], fragment("fun(?)", r)) |> plan() - assert all(query) == ~s{SELECT fun(s0) FROM "schema" AS s0} - - query = Schema |> select([r], fragment("downcase(?)", r.x)) |> plan() - assert all(query) == ~s{SELECT downcase(s0."x") FROM "schema" AS s0} - - query = Schema |> select([r], fragment("? COLLATE ?", r.x, literal(^"es_ES"))) |> plan() - assert all(query) == ~s{SELECT s0."x" COLLATE "es_ES" FROM "schema" AS s0} - - value = 13 - query = Schema |> select([r], fragment("downcase(?, ?)", r.x, ^value)) |> plan() - assert all(query) == ~s{SELECT downcase(s0."x", ?) FROM "schema" AS s0} - - query = Schema |> select([], fragment(title: 2)) |> plan() - assert_raise Ecto.QueryError, fn -> - all(query) - end - end - - test "literals" do - query = "schema" |> where(foo: true) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 1)} - - query = "schema" |> where(foo: false) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 0)} - - query = "schema" |> where(foo: "abc") |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 'abc')} - - query = "schema" |> where(foo: <<0,?a,?b,?c>>) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = x'00616263')} - - query = "schema" |> where(foo: 123) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 123)} - - query = "schema" |> where(foo: 123.0) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = CAST(123.0 AS REAL))} - end - - test "aliasing a selected value with selected_as/2" do - query = "schema" |> select([s], selected_as(s.x, :integer)) |> plan() - assert all(query) == ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0} - - query = "schema" |> select([s], s.x |> coalesce(0) |> sum() |> selected_as(:integer)) |> plan() - assert all(query) == ~s{SELECT sum(coalesce(s0."x", 0)) AS "integer" FROM "schema" AS s0} - end - - test "group_by can reference the alias of a selected value with selected_as/1" do - query = "schema" |> select([s], selected_as(s.x, :integer)) |> group_by(selected_as(:integer)) |> plan() - assert all(query) == ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0 GROUP BY "integer"} - end - - test "order_by can reference the alias of a selected value with selected_as/1" do - query = "schema" |> select([s], selected_as(s.x, :integer)) |> order_by(selected_as(:integer)) |> plan() - assert all(query) == ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0 ORDER BY "integer"} - - query = "schema" |> select([s], selected_as(s.x, :integer)) |> order_by([desc: selected_as(:integer)]) |> plan() - assert all(query) == ~s{SELECT s0."x" AS "integer" FROM "schema" AS s0 ORDER BY "integer" DESC} - end - - test "datetime_add" do - query = "schema" |> where([s], datetime_add(s.foo, 1, "month") > s.bar) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (datetime(s0."foo",'1 month') > s0."bar")} - - query = "schema" |> where([s], datetime_add(type(s.foo, :string), 1, "month") > s.bar) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (datetime(CAST(s0."foo" AS TEXT),'1 month') > s0."bar")} - end - - test "tagged type" do - query = Schema |> select([t], type(t.x + t.y, :integer)) |> plan() - assert all(query) == ~s{SELECT CAST(s0."x" + s0."y" AS INTEGER) FROM "schema" AS s0} - - query = Schema |> select([], type(^"601d74e4-a8d3-4b6e-8365-eddb4c893327", Ecto.UUID)) |> plan() - assert all(query) == ~s{SELECT CAST(? AS TEXT) FROM "schema" AS s0} - - query = Schema |> select([], type(^["601d74e4-a8d3-4b6e-8365-eddb4c893327"], {:array, Ecto.UUID})) |> plan() - assert all(query) == ~s{SELECT CAST(? AS TEXT) FROM "schema" AS s0} - end - - test "json_extract_path" do - query = Schema |> select([s], json_extract_path(s.meta, [0, 1])) |> plan() - assert all(query) == ~s|SELECT json_extract(s0.\"meta\", '$[0][1]') FROM "schema" AS s0| - - query = Schema |> select([s], json_extract_path(s.meta, ["a", "b"])) |> plan() - assert all(query) == ~s|SELECT json_extract(s0.\"meta\", '$.a.b') FROM "schema" AS s0| - - query = Schema |> select([s], json_extract_path(s.meta, ["'a"])) |> plan() - assert all(query) == ~s|SELECT json_extract(s0.\"meta\", '$.''a') FROM "schema" AS s0| - - query = Schema |> select([s], json_extract_path(s.meta, ["\"a"])) |> plan() - assert all(query) == ~s|SELECT json_extract(s0.\"meta\", '$.\\\"a') FROM "schema" AS s0| - end - - test "optimized json_extract_path" do - query = Schema |> where([s], s.meta["id"] == 123) |> select(true) |> plan() - assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0.\"meta\", '$.id') = 123)| - - query = Schema |> where([s], s.meta["id"] == "123") |> select(true) |> plan() - assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.id') = '123')| - - query = Schema |> where([s], s.meta["tags"][0]["name"] == "123") |> select(true) |> plan() - assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.tags[0].name') = '123')| - - query = Schema |> where([s], s.meta[0] == "123") |> select(true) |> plan() - assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0.\"meta\", '$[0]') = '123')| - - query = Schema |> where([s], s.meta["enabled"] == true) |> select(true) |> plan() - assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.enabled') = 1)| - - query = Schema |> where([s], s.meta["extra"][0]["enabled"] == false) |> select(true) |> plan() - assert all(query) == ~s|SELECT 1 FROM "schema" AS s0 WHERE (json_extract(s0."meta", '$.extra[0].enabled') = 0)| - end - - test "nested expressions" do - z = 123 - query = from(r in Schema, []) |> select([r], r.x > 0 and (r.y > ^(-z)) or true) |> plan() - assert all(query) == ~s{SELECT ((s0."x" > 0) AND (s0."y" > ?)) OR 1 FROM "schema" AS s0} - end - - test "in expression" do - query = Schema |> select([e], 1 in []) |> plan() - assert all(query) == ~s{SELECT 0 FROM "schema" AS s0} - - query = - Schema - |> select([e], 1 in ^[]) - |> plan() - - assert all(query) == ~s{SELECT 0 FROM "schema" AS s0} - - query = - Schema - |> select([e], 1 in ^[1, 2, 3]) - |> plan() - - assert all(query) == ~s{SELECT 1 IN (?,?,?) FROM "schema" AS s0} - - query = - Schema - |> select([e], 1 in [1, ^2, 3]) - |> plan() - - assert all(query) == ~s{SELECT 1 IN (1,?,3) FROM "schema" AS s0} - - query = - Schema - |> select([e], e.x == ^0 or e.x in ^[1, 2, 3] or e.x == ^4) - |> plan() - - assert all(query) == - ~s{SELECT ((s0."x" = ?) OR s0."x" IN (?,?,?)) OR (s0."x" = ?) FROM "schema" AS s0} - - query = - Schema - |> select([e], e in [1, 2, 3]) - |> plan() - - assert all(query) == ~s{SELECT s0 IN (1,2,3) FROM "schema" AS s0} - - - query = Schema |> select([e], 1 in [1,e.x,3]) |> plan() - assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} - - query = Schema |> select([e], 1 in [1, ^2, 3]) |> plan() - assert all(query) == ~s{SELECT 1 IN (1,?,3) FROM "schema" AS s0} - - query = Schema |> select([e], ^1 in [1, ^2, 3]) |> plan() - assert all(query) == ~s{SELECT ? IN (1,?,3) FROM "schema" AS s0} - - end - - test "in subquery" do - posts = subquery("posts" |> where(title: ^"hello") |> select([p], p.id)) - query = "comments" |> where([c], c.post_id in subquery(posts)) |> select([c], c.x) |> plan() - assert all(query) == - ~s{SELECT c0."x" FROM "comments" AS c0 } <> - ~s{WHERE (c0."post_id" IN (SELECT sp0."id" FROM "posts" AS sp0 WHERE (sp0."title" = ?)))} - - posts = subquery("posts" |> where(title: parent_as(:comment).subtitle) |> select([p], p.id)) - query = "comments" |> from(as: :comment) |> where([c], c.post_id in subquery(posts)) |> select([c], c.x) |> plan() - assert all(query) == - ~s{SELECT c0."x" FROM "comments" AS c0 } <> - ~s{WHERE (c0."post_id" IN (SELECT sp0."id" FROM "posts" AS sp0 WHERE (sp0."title" = c0."subtitle")))} - end - - test "having" do - query = Schema |> having([p], p.x == p.x) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x")} - - query = Schema |> having([p], p.x == p.x) |> having([p], p.y == p.y) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x") AND (s0."y" = s0."y")} - end - - test "or_having" do - query = Schema |> or_having([p], p.x == p.x) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x")} - - query = Schema |> or_having([p], p.x == p.x) |> or_having([p], p.y == p.y) |> select([], true) |> plan() - assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x") OR (s0."y" = s0."y")} - end - - test "group by" do - query = Schema |> group_by([r], r.x) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY s0."x"} - - query = Schema |> group_by([r], 2) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY 2} - - query = Schema |> group_by([r], [r.x, r.y]) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY s0."x", s0."y"} - - query = Schema |> group_by([r], []) |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0} - end - - - test "arrays and sigils" do - query = Schema |> select([], fragment("?", [1, 2, 3])) |> plan() - assert all(query) == ~s{SELECT JSON_ARRAY('[1,2,3]') FROM "schema" AS s0} - - query = Schema |> select([], fragment("?", ~w(abc def))) |> plan() - assert all(query) == ~s{SELECT JSON_ARRAY('["abc","def"]') FROM "schema" AS s0} - - query = Schema |> where([s], s.w == []) |> select([s], s.w) |> plan() - assert all(query) == ~s{SELECT s0."w" FROM "schema" AS s0 WHERE (s0."w" = JSON_ARRAY('[]'))} - end - - - test "interpolated values" do - cte1 = "schema1" |> select([m], %{id: m.id, smth: ^true}) |> where([], fragment("?", ^1)) - union = "schema1" |> select([m], {m.id, ^true}) |> where([], fragment("?", ^5)) - union_all = "schema2" |> select([m], {m.id, ^false}) |> where([], fragment("?", ^6)) - - query = "schema" - |> with_cte("cte1", as: ^cte1) - |> with_cte("cte2", as: fragment("SELECT * FROM schema WHERE ?", ^2)) - |> select([m], {m.id, ^true}) - |> join(:inner, [], Schema2, on: fragment("?", ^true)) - |> join(:inner, [], Schema2, on: fragment("?", ^false)) - |> where([], fragment("?", ^true)) - |> where([], fragment("?", ^false)) - |> having([], fragment("?", ^true)) - |> having([], fragment("?", ^false)) - |> group_by([], fragment("?", ^3)) - |> group_by([], fragment("?", ^4)) - |> union(^union) - |> union_all(^union_all) - |> order_by([], fragment("?", ^7)) - |> limit([], ^8) - |> offset([], ^9) - |> plan() - - result = - "WITH \"cte1\" AS (SELECT ss0.\"id\" AS \"id\", ? AS \"smth\" FROM \"schema1\" AS ss0 WHERE (?)), " <> - "\"cte2\" AS (SELECT * FROM schema WHERE ?) " <> - "SELECT s0.\"id\", ? FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON ? " <> - "INNER JOIN \"schema2\" AS s2 ON ? WHERE (?) AND (?) " <> - "GROUP BY ?, ? HAVING (?) AND (?) " <> - "UNION (SELECT s0.\"id\", ? FROM \"schema1\" AS s0 WHERE (?)) " <> - "UNION ALL (SELECT s0.\"id\", ? FROM \"schema2\" AS s0 WHERE (?)) " <> - "ORDER BY ? LIMIT ? OFFSET ?" - - assert all(query) == String.trim(result) - end - - test "order_by and types" do - query = "schema3" |> order_by([e], type(fragment("?", e.binary), ^:decimal)) |> select(true) |> plan() - assert all(query) == "SELECT 1 FROM \"schema3\" AS s0 ORDER BY CAST(s0.\"binary\" AS REAL)" - end - - test "fragments and types" do - query = - plan from(e in "schema", - where: fragment("extract(? from ?) = ?", ^"month", e.start_time, type(^"4", :integer)), - where: fragment("extract(? from ?) = ?", ^"year", e.start_time, type(^"2015", :integer)), - select: true) - - result = - "SELECT 1 FROM \"schema\" AS s0 " <> - "WHERE (extract(? from s0.\"start_time\") = CAST(? AS INTEGER)) " <> - "AND (extract(? from s0.\"start_time\") = CAST(? AS INTEGER))" - - assert all(query) == String.trim(result) - end - - test "fragments allow ? to be escaped with backslash" do - query = - plan from(e in "schema", - where: fragment("? = \"query\\?\"", e.start_time), - select: true) - - result = - "SELECT 1 FROM \"schema\" AS s0 " <> - "WHERE (s0.\"start_time\" = \"query?\")" - - assert all(query) == String.trim(result) - end - - test "build_explain_query" do - assert SQL.build_explain_query("SELECT 1", :query_plan) == "EXPLAIN QUERY PLAN SELECT 1" - assert SQL.build_explain_query("SELECT 1", :instructions) == "EXPLAIN SELECT 1" - end - - ## *_all - - test "update all" do - query = from(m in Schema, update: [set: [x: 0]]) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "x" = 0} - - query = from(m in Schema, update: [set: [x: 0], inc: [y: 1, z: -3]]) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "x" = 0, "y" = "y" + 1, "z" = "z" + -3} - - query = from(e in Schema, where: e.x == 123, update: [set: [x: 0]]) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "x" = 0 WHERE (s0."x" = 123)} - - query = from(m in Schema, update: [set: [x: ^0]]) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "x" = ?} - - query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) - |> update([_], set: [x: 0]) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "x" = 0 FROM "schema2" AS s1 WHERE (s0."x" = s1."z")} - - query = from(e in Schema, where: e.x == 123, update: [set: [x: 0]], - join: q in Schema2, on: e.x == q.z) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "x" = 0 FROM "schema2" AS s1 } <> - ~s{WHERE (s0."x" = s1."z") AND (s0."x" = 123)} - end - - test "update all with returning" do - query = from(m in Schema, update: [set: [x: 0]]) |> select([m], m) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "x" = 0 RETURNING "id", "x", "y", "z", "w", "meta"} - - query = from(m in Schema, update: [set: [x: ^1]]) |> where([m], m.x == ^2) |> select([m], m.x == ^3) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "x" = ? WHERE (s0."x" = ?) RETURNING "x" = ?} - end - - test "update all array ops" do - query = from(m in Schema, update: [push: [w: 0]]) |> plan(:update_all) - assert update_all(query) == ~s{UPDATE "schema" AS s0 SET "w" = JSON_INSERT("w",'$[#]',0)} - - query = from(m in Schema, update: [pull: [w: 0]]) |> plan(:update_all) - assert_raise Ecto.QueryError, fn -> - update_all(query) - end - end - - test "update all with subquery" do - sub = from(p in Schema, where: p.x > ^10) - - query = - Schema - |> join(:inner, [p], p2 in subquery(sub), on: p.id == p2.id) - |> update([_], set: [x: ^100]) - - {planned_query, cast_params, dump_params} = - Ecto.Adapter.Queryable.plan_query(:update_all, Ecto.Adapters.SQLite3, query) - - assert update_all(planned_query) == - ~s{UPDATE "schema" AS s0 SET "x" = ? FROM } <> - ~s{(SELECT ss0."id" AS "id", ss0."x" AS "x", ss0."y" AS "y", ss0."z" AS "z", ss0."w" AS "w", ss0."meta" AS "meta" FROM "schema" AS ss0 WHERE (ss0."x" > ?)) } <> - ~s{AS s1 WHERE (s0."id" = s1."id")} - - assert cast_params == [100, 10] - assert dump_params == [100, 10] - end - - test "update all with prefix" do - query = from(m in Schema, update: [set: [x: 0]]) |> Map.put(:prefix, "prefix") |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "prefix"."schema" AS s0 SET "x" = 0} - - query = from(m in Schema, prefix: "first", update: [set: [x: 0]]) |> Map.put(:prefix, "prefix") |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "first"."schema" AS s0 SET "x" = 0} - end - - # TODO this is broken? - test "update all with left join" do - query = from(m in Schema, join: x in assoc(m, :comments), left_join: p in assoc(m, :permalink), update: [set: [w: m.list2]]) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "w" = s0."list2" FROM "schema2" AS s1 LEFT OUTER JOIN "schema3" AS s2 ON s2."id" = s0."y" WHERE (s1."z" = s0."x")} - end - - test "delete all" do - query = Schema |> Queryable.to_query |> plan() - assert delete_all(query) == ~s{DELETE FROM "schema" AS s0} - - query = from(e in Schema, where: e.x == 123) |> plan() - assert delete_all(query) == - ~s{DELETE FROM "schema" AS s0 WHERE (s0."x" = 123)} - - query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) |> plan() - assert_raise ArgumentError, fn -> - delete_all(query) - end - - query = from(e in Schema, where: e.x == 123, join: q in Schema2, on: e.x == q.z) |> plan() - assert_raise ArgumentError, fn -> - delete_all(query) - end - - query = from(e in Schema, where: e.x == 123, join: assoc(e, :comments), join: assoc(e, :permalink)) |> plan() - assert_raise ArgumentError, fn -> - delete_all(query) - end - end - - test "delete all with returning" do - query = Schema |> Queryable.to_query |> select([m], m) |> plan() - assert delete_all(query) == ~s{DELETE FROM "schema" AS s0 RETURNING "id", "x", "y", "z", "w", "meta"} - end - - test "delete all with prefix" do - query = Schema |> Queryable.to_query |> Map.put(:prefix, "prefix") |> plan() - assert delete_all(query) == ~s{DELETE FROM "prefix"."schema" AS s0} - - query = Schema |> from(prefix: "first") |> Map.put(:prefix, "prefix") |> plan() - assert delete_all(query) == ~s{DELETE FROM "first"."schema" AS s0} - end - - ## Partitions and windows - - describe "windows and partitions" do - test "one window" do - query = Schema - |> select([r], r.x) - |> windows([r], w: [partition_by: r.x]) - |> plan - - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x")} - end - - test "two windows" do - query = Schema - |> select([r], r.x) - |> windows([r], w1: [partition_by: r.x], w2: [partition_by: r.y]) - |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WINDOW "w1" AS (PARTITION BY s0."x"), "w2" AS (PARTITION BY s0."y")} - end - - test "count over window" do - query = Schema - |> windows([r], w: [partition_by: r.x]) - |> select([r], count(r.x) |> over(:w)) - |> plan() - assert all(query) == ~s{SELECT count(s0."x") OVER "w" FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x")} - end - - test "count over all" do - query = Schema - |> select([r], count(r.x) |> over) - |> plan() - assert all(query) == ~s{SELECT count(s0."x") OVER () FROM "schema" AS s0} - end - - test "row_number over all" do - query = Schema - |> select(row_number |> over) - |> plan() - assert all(query) == ~s{SELECT row_number() OVER () FROM "schema" AS s0} - end - - test "nth_value over all" do - query = Schema - |> select([r], nth_value(r.x, 42) |> over) - |> plan() - assert all(query) == ~s{SELECT nth_value(s0."x", 42) OVER () FROM "schema" AS s0} - end - - test "lag/2 over all" do - query = Schema - |> select([r], lag(r.x, 42) |> over) - |> plan() - assert all(query) == ~s{SELECT lag(s0."x", 42) OVER () FROM "schema" AS s0} - end - - test "custom aggregation over all" do - query = Schema - |> select([r], fragment("custom_function(?)", r.x) |> over) - |> plan() - assert all(query) == ~s{SELECT custom_function(s0."x") OVER () FROM "schema" AS s0} - end - - test "partition by and order by on window" do - query = Schema - |> windows([r], w: [partition_by: [r.x, r.z], order_by: r.x]) - |> select([r], r.x) - |> plan() - assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 WINDOW "w" AS (PARTITION BY s0."x", s0."z" ORDER BY s0."x")} - end - - test "partition by ond order by over" do - query = Schema - |> select([r], count(r.x) |> over(partition_by: [r.x, r.z], order_by: r.x)) - - query = query |> plan() - assert all(query) == ~s{SELECT count(s0."x") OVER (PARTITION BY s0."x", s0."z" ORDER BY s0."x") FROM "schema" AS s0} - end - - test "frame clause" do - query = Schema - |> select([r], count(r.x) |> over(partition_by: [r.x, r.z], order_by: r.x, frame: fragment("ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING"))) - - query = query |> plan() - assert all(query) == ~s{SELECT count(s0."x") OVER (PARTITION BY s0."x", s0."z" ORDER BY s0."x" ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) FROM "schema" AS s0} - end - end - - ## Joins - - test "join" do - query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) |> select([], true) |> plan() - assert all(query) == - ~s{SELECT 1 FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON s0."x" = s1."z"} - - query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) - |> join(:inner, [], Schema, on: true) |> select([], true) |> plan() - assert all(query) == - ~s{SELECT 1 FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON s0."x" = s1."z" } <> - ~s{INNER JOIN "schema" AS s2 ON 1} - end - - test "join with hints" do - assert_raise Ecto.QueryError, ~r/join hints are not supported by SQLite3/, fn -> - Schema - |> join(:inner, [p], q in Schema2, hints: ["USE INDEX FOO", "USE INDEX BAR"]) - |> select([], true) - |> plan() - |> all() - end - end - - test "join with nothing bound" do - query = Schema |> join(:inner, [], q in Schema2, on: q.z == q.z) |> select([], true) |> plan() - assert all(query) == - ~s{SELECT 1 FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON s1."z" = s1."z"} - end - - test "join without schema" do - query = "posts" |> join(:inner, [p], q in "comments", on: p.x == q.z) |> select([], true) |> plan() - assert all(query) == - ~s{SELECT 1 FROM "posts" AS p0 INNER JOIN "comments" AS c1 ON p0."x" = c1."z"} - end - - test "join with subquery" do - posts = subquery("posts" |> where(title: ^"hello") |> select([r], %{x: r.x, y: r.y})) - query = "comments" |> join(:inner, [c], p in subquery(posts), on: true) |> select([_, p], p.x) |> plan() - assert all(query) == - ~s{SELECT s1."x" FROM "comments" AS c0 } <> - ~s{INNER JOIN (SELECT sp0."x" AS "x", sp0."y" AS "y" FROM "posts" AS sp0 WHERE (sp0."title" = ?)) AS s1 ON 1} - - posts = subquery("posts" |> where(title: ^"hello") |> select([r], %{x: r.x, z: r.y})) - query = "comments" |> join(:inner, [c], p in subquery(posts), on: true) |> select([_, p], p) |> plan() - assert all(query) == - ~s{SELECT s1."x", s1."z" FROM "comments" AS c0 } <> - ~s{INNER JOIN (SELECT sp0."x" AS "x", sp0."y" AS "z" FROM "posts" AS sp0 WHERE (sp0."title" = ?)) AS s1 ON 1} - - posts = subquery("posts" |> where(title: parent_as(:comment).subtitle) |> select([r], r.title)) - query = "comments" |> from(as: :comment) |> join(:inner, [c], p in subquery(posts)) |> select([_, p], p) |> plan() - assert all(query) == - ~s{SELECT s1."title" FROM "comments" AS c0 } <> - ~s{INNER JOIN (SELECT sp0."title" AS "title" FROM "posts" AS sp0 WHERE (sp0."title" = c0."subtitle")) AS s1 ON 1} - end - - test "join with prefix" do - query = Schema |> join(:inner, [p], q in Schema2, on: p.x == q.z) |> select([], true) |> Map.put(:prefix, "prefix") |> plan() - assert all(query) == - ~s{SELECT 1 FROM "prefix"."schema" AS s0 INNER JOIN "prefix"."schema2" AS s1 ON s0."x" = s1."z"} - - query = Schema |> from(prefix: "first") |> join(:inner, [p], q in Schema2, on: p.x == q.z, prefix: "second") |> select([], true) |> Map.put(:prefix, "prefix") |> plan() - assert all(query) == - ~s{SELECT 1 FROM "first"."schema" AS s0 INNER JOIN "second"."schema2" AS s1 ON s0."x" = s1."z"} - end - - test "join with fragment" do - query = Schema - |> join(:inner, [p], q in fragment("SELECT * FROM schema2 AS s2 WHERE s2.id = ? AND s2.field = ?", p.x, ^10)) - |> select([p], {p.id, ^0}) - |> where([p], p.id > 0 and p.id < ^100) - |> plan() - assert all(query) == - ~s{SELECT s0."id", ? FROM "schema" AS s0 INNER JOIN } <> - ~s{(SELECT * FROM schema2 AS s2 WHERE s2.id = s0."x" AND s2.field = ?) AS f1 ON 1 } <> - ~s{WHERE ((s0."id" > 0) AND (s0."id" < ?))} - end - - test "join with fragment and on defined" do - query = Schema - |> join(:inner, [p], q in fragment("SELECT * FROM schema2"), on: q.id == p.id) - |> select([p], {p.id, ^0}) - |> plan() - assert all(query) == - ~s{SELECT s0."id", ? FROM "schema" AS s0 INNER JOIN } <> - ~s{(SELECT * FROM schema2) AS f1 ON f1."id" = s0."id"} - end - - test "join with query interpolation" do - inner = Ecto.Queryable.to_query(Schema2) - query = from(p in Schema, left_join: c in ^inner, select: {p.id, c.id}) |> plan() - assert all(query) == - "SELECT s0.\"id\", s1.\"id\" FROM \"schema\" AS s0 LEFT OUTER JOIN \"schema2\" AS s1 ON 1" - end - - test "lateral join with fragment" do - query = Schema - |> join(:inner_lateral, [p], q in fragment("SELECT * FROM schema2 AS s2 WHERE s2.id = ? AND s2.field = ?", p.x, ^10)) - |> select([p, q], {p.id, q.z}) - |> where([p], p.id > 0 and p.id < ^100) - |> plan() - assert_raise Ecto.QueryError, fn -> - all(query) - end - end - - test "cross lateral join with fragment" do - query = Schema - |> join(:cross_lateral, [p], q in fragment("SELECT * FROM schema2 AS s2 WHERE s2.id = ? AND s2.field = ?", p.x, ^10)) - |> select([p, q], {p.id, q.z}) - |> where([p], p.id > 0 and p.id < ^100) - |> plan() - assert_raise Ecto.QueryError, fn -> - all(query) - end - end - - test "cross join" do - query = from(p in Schema, cross_join: c in Schema2, select: {p.id, c.id}) |> plan() - assert all(query) == - "SELECT s0.\"id\", s1.\"id\" FROM \"schema\" AS s0 CROSS JOIN \"schema2\" AS s1" - end - - test "cross join with fragment" do - query = from(p in Schema, cross_join: fragment("json_each(?)", p.j), select: {p.id}) |> plan() - assert all(query) == - ~s{SELECT s0."id" FROM "schema" AS s0 CROSS JOIN json_each(s0."j") AS f1} - end - - test "join produces correct bindings" do - query = from(p in Schema, join: c in Schema2, on: true) - query = from(p in query, join: c in Schema2, on: true, select: {p.id, c.id}) - query = plan(query) - assert all(query) == - "SELECT s0.\"id\", s2.\"id\" FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON 1 INNER JOIN \"schema2\" AS s2 ON 1" - end - - describe "query interpolation parameters" do - test "self join on subquery" do - subquery = select(Schema, [r], %{x: r.x, y: r.y}) - query = subquery |> join(:inner, [c], p in subquery(subquery), on: true) |> plan() - assert all(query) == - ~s{SELECT s0."x", s0."y" FROM "schema" AS s0 INNER JOIN } <> - ~s{(SELECT ss0."x" AS "x", ss0."y" AS "y" FROM "schema" AS ss0) } <> - ~s{AS s1 ON 1} - end - - test "self join on subquery with fragment" do - subquery = select(Schema, [r], %{string: fragment("downcase(?)", ^"string")}) - query = subquery |> join(:inner, [c], p in subquery(subquery), on: true) |> plan() - assert all(query) == - ~s{SELECT downcase(?) FROM "schema" AS s0 INNER JOIN } <> - ~s{(SELECT downcase(?) AS "string" FROM "schema" AS ss0) } <> - ~s{AS s1 ON 1} - end - - test "join on subquery with simple select" do - subquery = select(Schema, [r], %{x: ^999, w: ^888}) - query = Schema - |> select([r], %{y: ^666}) - |> join(:inner, [c], p in subquery(subquery), on: true) - |> where([a, b], a.x == ^111) - |> plan() - - assert all(query) == - ~s{SELECT ? FROM "schema" AS s0 INNER JOIN } <> - ~s{(SELECT ? AS "x", ? AS "w" FROM "schema" AS ss0) AS s1 ON 1 } <> - ~s{WHERE (s0."x" = ?)} - end - end - - ## Associations - - test "association join belongs_to" do - query = Schema2 |> join(:inner, [c], p in assoc(c, :post)) |> select([], true) |> plan() - assert all(query) == - "SELECT 1 FROM \"schema2\" AS s0 INNER JOIN \"schema\" AS s1 ON s1.\"x\" = s0.\"z\"" - end - - test "association join has_many" do - query = Schema |> join(:inner, [p], c in assoc(p, :comments)) |> select([], true) |> plan() - assert all(query) == - "SELECT 1 FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON s1.\"z\" = s0.\"x\"" - end - - test "association join has_one" do - query = Schema |> join(:inner, [p], pp in assoc(p, :permalink)) |> select([], true) |> plan() - assert all(query) == - "SELECT 1 FROM \"schema\" AS s0 INNER JOIN \"schema3\" AS s1 ON s1.\"id\" = s0.\"y\"" - end - - # Schema based - - test "insert" do - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:raise, [], []}, [:id]) - assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) RETURNING "id"} - - assert_raise ArgumentError, fn -> - insert(nil, "schema", [:x, :y], [[:x, :y], [nil, :z]], {:raise, [], []}, [:id]) - end - - assert_raise ArgumentError, fn -> - insert(nil, "schema", [:x, :y], [[:x, :y], [nil, :z]], {:raise, [], []}, [:id], [1, 2]) - end - - query = insert(nil, "schema", [], [[]], {:raise, [], []}, [:id]) - assert query == ~s{INSERT INTO "schema" DEFAULT VALUES RETURNING "id"} - - query = insert(nil, "schema", [], [[]], {:raise, [], []}, []) - assert query == ~s{INSERT INTO "schema" DEFAULT VALUES} - - query = insert("prefix", "schema", [], [[]], {:raise, [], []}, []) - assert query == ~s{INSERT INTO "prefix"."schema" DEFAULT VALUES} - end - - test "insert with on conflict" do - # For :nothing - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:nothing, [], []}, []) - assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) ON CONFLICT DO NOTHING} - - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:nothing, [], [:x, :y]}, []) - assert query == ~s{INSERT INTO "schema" ("x","y") VALUES (?,?) ON CONFLICT ("x","y") DO NOTHING} - - # For :update - update = from("schema", update: [set: [z: "foo"]]) |> plan(:update_all) - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) - assert query == ~s{INSERT INTO "schema" AS s0 ("x","y") VALUES (?,?) ON CONFLICT ("x","y") DO UPDATE SET "z" = 'foo' RETURNING "z"} - - assert_raise ArgumentError, "Upsert in SQLite3 requires :conflict_target", fn -> - conflict_target = [] - insert(nil, "schema", [:x, :y], [[:x, :y]], {:replace_all, [], conflict_target}, []) - end - - assert_raise ArgumentError, "Upsert in SQLite3 does not support ON CONSTRAINT", fn -> - insert(nil, "schema", [:x, :y], [[:x, :y]], {:replace_all, [], {:constraint, :foo}}, []) - end - - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:replace_all, [], [:id]}, []) - - assert query == - """ - INSERT INTO "schema" ("x","y") \ - VALUES (?,?) \ - ON CONFLICT ("id") \ - DO UPDATE SET "x" = EXCLUDED."x","y" = EXCLUDED."y"\ - """ - end - - test "insert with query" do - query = from("schema", select: [:id]) |> plan(:all) - assert_raise ArgumentError, fn -> - insert(nil, "schema", [:x, :y, :z], [[:x, {query, 3}, :z], [nil, {query, 2}, :z]], {:raise, [], []}, [:id]) - end - - end - - test "insert with query as rows" do - query = from(s in "schema", select: %{ foo: fragment("3"), bar: s.bar }) |> plan(:all) - - assert_raise ArgumentError, fn -> - insert(nil, "schema", [:foo, :bar], query, {:raise, [], []}, [:foo]) - end - - query = from(s in "schema", select: %{ foo: fragment("3"), bar: s.bar }, where: true) |> plan(:all) - query = insert(nil, "schema", [:foo, :bar], query, {:raise, [], []}, [:foo]) - assert query == ~s{INSERT INTO "schema" ("foo","bar") SELECT 3, s0."bar" FROM "schema" AS s0 WHERE (1) RETURNING "foo"} - end - - test "update" do - query = update(nil, "schema", [:x, :y], [id: 1], []) - assert query == ~s{UPDATE "schema" SET "x" = ?, "y" = ? WHERE "id" = ?} - - query = update(nil, "schema", [:x, :y], [id: 1], [:z]) - assert query == ~s{UPDATE "schema" SET "x" = ?, "y" = ? WHERE "id" = ? RETURNING "z"} - - query = update("prefix", "schema", [:x, :y], [id: 1], []) - assert query == ~s{UPDATE "prefix"."schema" SET "x" = ?, "y" = ? WHERE "id" = ?} - - query = update("prefix", "schema", [:x, :y], [id: 1, updated_at: nil], []) - assert query == ~s{UPDATE "prefix"."schema" SET "x" = ?, "y" = ? WHERE "id" = ? AND "updated_at" IS NULL} - end - - test "delete" do - query = delete(nil, "schema", [x: 1, y: 2], []) - assert query == ~s{DELETE FROM "schema" WHERE "x" = ? AND "y" = ?} - - query = delete(nil, "schema", [x: 1, y: 2], [:z]) - assert query == ~s{DELETE FROM "schema" WHERE "x" = ? AND "y" = ? RETURNING "z"} - - query = delete("prefix", "schema", [x: 1, y: 2], []) - assert query == ~s{DELETE FROM "prefix"."schema" WHERE "x" = ? AND "y" = ?} - - query = delete("prefix", "schema", [x: nil, y: 1], []) - assert query == ~s{DELETE FROM "prefix"."schema" WHERE "x" IS NULL AND "y" = ?} - end - - # DDL - - alias Ecto.Migration.Reference - import Ecto.Migration, only: [table: 1, table: 2, index: 2, index: 3, constraint: 3] - - test "executing a string during migration" do - assert execute_ddl("example") == ["example"] - end - - test "create table" do - create = {:create, table(:posts), - [{:add, :name, :string, [default: "Untitled", size: 20, null: false]}, - {:add, :price, :numeric, [precision: 8, scale: 2, default: {:fragment, "expr"}]}, - {:add, :on_hand, :integer, [default: 0, null: true]}, - {:add, :is_active, :boolean, [default: true]}, - {:add, :tags, {:array, :string}, [default: []]}, - {:add, :languages, {:array, :string}, [default: ["pt", "es"]]}, - {:add, :limits, {:array, :integer}, [default: [100, 30_000]]}]} - - assert execute_ddl(create) == [""" - CREATE TABLE "posts" ("name" TEXT DEFAULT 'Untitled' NOT NULL, - "price" NUMERIC DEFAULT expr, - "on_hand" INTEGER DEFAULT 0 NULL, - "is_active" INTEGER DEFAULT true, - "tags" TEXT DEFAULT ('[]'), - "languages" TEXT DEFAULT ('["pt","es"]'), - "limits" TEXT DEFAULT ('[100,30000]')) - """ |> remove_newlines] - end - - test "create table with prefix" do - create = {:create, table(:posts, prefix: :foo), - [{:add, :category_0, %Reference{table: :categories}, []}]} - - assert execute_ddl(create) == [""" - CREATE TABLE "foo"."posts" - ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "foo"."categories"("id")) - """ |> remove_newlines] - end - - test "raise on table comment" do - create = {:create, table(:posts, comment: "comment"), - [ - {:add, :category_0, %Reference{table: :categories}, [comment: "column comment"]}, - {:add, :created_at, :timestamp, []}, - {:add, :updated_at, :timestamp, [comment: "column comment 2"]} - ]} - - assert_raise ArgumentError, ~r/comment/, fn -> - execute_ddl(create) - end - end - - # TODO should we raise on comment? - test "create table with comment on columns" do - create = {:create, table(:posts), - [ - {:add, :category_0, %Reference{table: :categories}, []}, - {:add, :created_at, :timestamp, []}, - {:add, :updated_at, :timestamp, []} - ]} - assert execute_ddl(create) == [remove_newlines(""" - CREATE TABLE "posts" - ("category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), "created_at" TEXT, "updated_at" TEXT) - """) - ] - end - - test "create table with references" do - create = {:create, table(:posts), - [{:add, :id, :serial, [primary_key: true]}, - {:add, :category_0, %Reference{table: :categories}, []}, - {:add, :category_1, %Reference{table: :categories, name: :foo_bar}, []}, - {:add, :category_2, %Reference{table: :categories, on_delete: :nothing}, []}, - {:add, :category_3, %Reference{table: :categories, on_delete: :delete_all}, [null: false]}, - {:add, :category_4, %Reference{table: :categories, on_delete: :nilify_all}, []}, - {:add, :category_5, %Reference{table: :categories, on_update: :nothing}, []}, - {:add, :category_6, %Reference{table: :categories, on_update: :update_all}, [null: false]}, - {:add, :category_7, %Reference{table: :categories, on_update: :nilify_all}, []}, - {:add, :category_8, %Reference{table: :categories, on_delete: :nilify_all, on_update: :update_all}, [null: false]}, - {:add, :category_9, %Reference{table: :categories, on_delete: :restrict}, []}, - {:add, :category_10, %Reference{table: :categories, on_update: :restrict}, []}, - {:add, :category_11, %Reference{table: :categories, prefix: "foo", on_update: :restrict}, []} - ]} - - assert execute_ddl(create) == [""" - CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, - "category_0" INTEGER CONSTRAINT "posts_category_0_fkey" REFERENCES "categories"("id"), - "category_1" INTEGER CONSTRAINT "foo_bar" REFERENCES "categories"("id"), - "category_2" INTEGER CONSTRAINT "posts_category_2_fkey" REFERENCES "categories"("id"), - "category_3" INTEGER NOT NULL CONSTRAINT "posts_category_3_fkey" REFERENCES "categories"("id") ON DELETE CASCADE, - "category_4" INTEGER CONSTRAINT "posts_category_4_fkey" REFERENCES "categories"("id") ON DELETE SET NULL, - "category_5" INTEGER CONSTRAINT "posts_category_5_fkey" REFERENCES "categories"("id"), - "category_6" INTEGER NOT NULL CONSTRAINT "posts_category_6_fkey" REFERENCES "categories"("id") ON UPDATE CASCADE, - "category_7" INTEGER CONSTRAINT "posts_category_7_fkey" REFERENCES "categories"("id") ON UPDATE SET NULL, - "category_8" INTEGER NOT NULL CONSTRAINT "posts_category_8_fkey" REFERENCES "categories"("id") ON DELETE SET NULL ON UPDATE CASCADE, - "category_9" INTEGER CONSTRAINT "posts_category_9_fkey" REFERENCES "categories"("id") ON DELETE RESTRICT, - "category_10" INTEGER CONSTRAINT "posts_category_10_fkey" REFERENCES "categories"("id") ON UPDATE RESTRICT, - "category_11" INTEGER CONSTRAINT "posts_category_11_fkey" REFERENCES "foo"."categories"("id") ON UPDATE RESTRICT) - """ |> remove_newlines] - end - - test "create table with options" do - create = {:create, table(:posts, [options: "WITH FOO=BAR"]), - [{:add, :id, :serial, [primary_key: true]}, - {:add, :created_at, :naive_datetime, []}]} - assert execute_ddl(create) == - [~s|CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "created_at" TEXT) WITH FOO=BAR|] - end - - test "create table with composite key" do - create = {:create, table(:posts), - [{:add, :a, :integer, [primary_key: true]}, - {:add, :b, :integer, [primary_key: true]}, - {:add, :name, :string, []}]} - - assert execute_ddl(create) == [""" - CREATE TABLE "posts" ("a" INTEGER, "b" INTEGER, "name" TEXT, PRIMARY KEY ("a","b")) - """ |> remove_newlines] - end - - test "create table with binary column and UTF-8 default" do - create = {:create, table(:blobs), - [{:add, :blob, :binary, [default: "foo"]}]} - - assert execute_ddl(create) == [""" - CREATE TABLE "blobs" ("blob" BLOB DEFAULT 'foo') - """ |> remove_newlines] - end - - test "create table with binary column and hex blob literal default" do - create = {:create, table(:blobs), - [{:add, :blob, :binary, [default: "\\x666F6F"]}]} - - assert execute_ddl(create) == [""" - CREATE TABLE "blobs" ("blob" BLOB DEFAULT '\\\\x666F6F') - """ |> remove_newlines] - end - - test "create table with binary column and hex blob literal null-byte" do - create = {:create, table(:blobs), - [{:add, :blob, :binary, [default: "\\\x00"]}]} - - assert execute_ddl(create) == [""" - CREATE TABLE "blobs" ("blob" BLOB DEFAULT '\\\\\x00') - """ |> remove_newlines] - end - - test "create table with a map column, and an empty map default" do - create = {:create, table(:posts), - [ - {:add, :a, :map, [default: %{}]} - ] - } - assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("a" TEXT DEFAULT ('{}'))|] - end - - test "create table with a map column, and a map default with values" do - create = {:create, table(:posts), - [ - {:add, :a, :map, [default: %{foo: "bar", baz: "boom"}]} - ] - } - assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("a" TEXT DEFAULT ('{"baz":"boom","foo":"bar"}'))|] - end - - test "create table with a map column, and a string default" do - create = {:create, table(:posts), - [ - {:add, :a, :map, [default: ~s|{"foo":"bar","baz":"boom"}|]} - ] - } - assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("a" TEXT DEFAULT '{"foo":"bar","baz":"boom"}')|] - end - - test "create table with time columns" do - create = {:create, table(:posts), - [{:add, :published_at, :time, [precision: 3]}, - {:add, :submitted_at, :time, []}]} - - assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] - end - - test "create table with time_usec columns" do - create = {:create, table(:posts), - [{:add, :published_at, :time_usec, [precision: 3]}, - {:add, :submitted_at, :time_usec, []}]} - - assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] - end - - test "create table with utc_datetime columns" do - create = {:create, table(:posts), - [{:add, :published_at, :utc_datetime, [precision: 3]}, - {:add, :submitted_at, :utc_datetime, []}]} - - assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] - end - - test "create table with utc_datetime_usec columns" do - create = {:create, table(:posts), - [{:add, :published_at, :utc_datetime_usec, [precision: 3]}, - {:add, :submitted_at, :utc_datetime_usec, []}]} - - assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] - end - - test "create table with naive_datetime columns" do - create = {:create, table(:posts), - [{:add, :published_at, :naive_datetime, [precision: 3]}, - {:add, :submitted_at, :naive_datetime, []}]} - - assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] - end - - test "create table with naive_datetime_usec columns" do - create = {:create, table(:posts), - [{:add, :published_at, :naive_datetime_usec, [precision: 3]}, - {:add, :submitted_at, :naive_datetime_usec, []}]} - - assert execute_ddl(create) == [~s|CREATE TABLE "posts" ("published_at" TEXT, "submitted_at" TEXT)|] - end - - test "create table with an unsupported type" do - create = {:create, table(:posts), - [ - {:add, :a, {:a, :b, :c}, [default: %{}]} - ] - } - assert_raise ArgumentError, - "unsupported type `{:a, :b, :c}`. " <> - "The type can either be an atom, a string or a tuple of the form " <> - "`{:map, t}` or `{:array, t}` where `t` itself follows the same conditions.", - fn -> execute_ddl(create) end - end - - test "drop table" do - drop = {:drop, table(:posts)} - assert execute_ddl(drop) == [~s|DROP TABLE "posts"|] - end - - test "drop table with prefix" do - drop = {:drop, table(:posts, prefix: :foo)} - assert execute_ddl(drop) == [~s|DROP TABLE "foo"."posts"|] - end - - test "drop table with cascade" do - drop = {:drop, table(:posts), :cascade} - - assert_raise ArgumentError, fn-> - execute_ddl(drop) - end - - assert_raise ArgumentError, fn-> - execute_ddl(drop) - end - end - - test "alter table" do - alter = {:alter, table(:posts), - [{:add, :title, :string, [default: "Untitled", size: 100, null: false]}, - {:add, :author_id, %Reference{table: :author}, []}, - {:add, :category_id, %Reference{table: :categories, validate: false}, []}, - {:remove, :summary}, - {:remove, :body, :text, []}, - {:remove, :space_id, %Reference{table: :author}, []}]} - - assert execute_ddl(alter) == [ - ~s|ALTER TABLE "posts" ADD COLUMN "title" TEXT DEFAULT 'Untitled' NOT NULL|, - ~s|ALTER TABLE "posts" ADD COLUMN "author_id" INTEGER CONSTRAINT "posts_author_id_fkey" REFERENCES "author"("id")|, - ~s|ALTER TABLE "posts" ADD COLUMN "category_id" INTEGER CONSTRAINT "posts_category_id_fkey" REFERENCES "categories"("id")|, - ~s|ALTER TABLE "posts" DROP COLUMN "summary"|, - ~s|ALTER TABLE "posts" DROP COLUMN "body"|, - ~s|ALTER TABLE "posts" DROP COLUMN "space_id"| - ] - end - - test "alter table with prefix" do - alter = {:alter, table(:posts, prefix: :foo), - [{:add, :author_id, %Reference{table: :author}, []}]} - - assert execute_ddl(alter) == [""" - ALTER TABLE "foo"."posts" - ADD COLUMN "author_id" INTEGER CONSTRAINT "posts_author_id_fkey" REFERENCES "foo"."author"("id") - """ |> remove_newlines] - end - - test "alter table with serial primary key" do - alter = {:alter, table(:posts), - [{:add, :my_pk, :serial, [primary_key: true]}]} - - assert execute_ddl(alter) == [""" - ALTER TABLE "posts" - ADD COLUMN "my_pk" INTEGER PRIMARY KEY AUTOINCREMENT - """ |> remove_newlines] - end - - test "alter table with bigserial primary key" do - alter = {:alter, table(:posts), - [{:add, :my_pk, :bigserial, [primary_key: true]}]} - - assert execute_ddl(alter) == [""" - ALTER TABLE "posts" - ADD COLUMN "my_pk" INTEGER PRIMARY KEY AUTOINCREMENT - """ |> remove_newlines] - end - - test "create index" do - create = {:create, index(:posts, [:category_id, :permalink])} - assert execute_ddl(create) == - [~s|CREATE INDEX "posts_category_id_permalink_index" ON "posts" ("category_id", "permalink")|] - - create = {:create, index(:posts, ["lower(permalink)"], name: "posts$main")} - assert execute_ddl(create) == - [~s|CREATE INDEX "posts$main" ON "posts" (lower(permalink))|] - end - - test "create index with prefix" do - create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo)} - assert execute_ddl(create) == - [~s|CREATE INDEX "posts_category_id_permalink_index" ON "foo"."posts" ("category_id", "permalink")|] - - create = {:create, index(:posts, ["lower(permalink)"], name: "posts$main", prefix: :foo)} - assert execute_ddl(create) == - [~s|CREATE INDEX "posts$main" ON "foo"."posts" (lower(permalink))|] - end - - test "raise on create index with comment" do - create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo, comment: "comment")} - assert_raise ArgumentError, ~r/comment/, fn -> - execute_ddl(create) - end - end - - test "create index with comment" do - create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo)} - assert execute_ddl(create) == [remove_newlines(""" - CREATE INDEX "posts_category_id_permalink_index" ON "foo"."posts" ("category_id", "permalink") - """)] - end - - test "create unique index" do - create = {:create, index(:posts, [:permalink], unique: true)} - assert execute_ddl(create) == - [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink")|] - end - - test "create unique index with condition" do - create = {:create, index(:posts, [:permalink], unique: true, where: "public IS 1")} - assert execute_ddl(create) == - [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") WHERE public IS 1|] - - create = {:create, index(:posts, [:permalink], unique: true, where: :public)} - assert execute_ddl(create) == - [~s|CREATE UNIQUE INDEX "posts_permalink_index" ON "posts" ("permalink") WHERE public|] - end - - test "create index with include fields" do - create = {:create, index(:posts, [:permalink], unique: true, include: [:public])} - assert_raise ArgumentError, fn -> - execute_ddl(create) - end - end - - test "create unique index with nulls_distinct option" do - create = {:create, index(:posts, [:permalink], unique: true, nulls_distinct: true)} - assert_raise ArgumentError, fn -> - execute_ddl(create) - end - end - - test "create index concurrently not supported" do - index = index(:posts, [:permalink]) - create = {:create, %{index | concurrently: true}} - assert_raise ArgumentError, fn -> - execute_ddl(create) - end - end - - test "create an index using a different type" do - create = {:create, index(:posts, [:permalink], using: :hash)} - assert_raise ArgumentError, fn -> - execute_ddl(create) - end - end - - test "create an index without recursively creating indexes on partitions" do - create = {:create, index(:posts, [:permalink], only: true)} - assert_raise ArgumentError, fn -> - execute_ddl(create) - end - end - - test "drop index" do - drop = {:drop, index(:posts, [:id], name: "posts$main")} - assert execute_ddl(drop) == [~s|DROP INDEX "posts$main"|] - end - - test "drop index with prefix" do - drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo)} - assert execute_ddl(drop) == [~s|DROP INDEX "foo"."posts$main"|] - end - - test "drop index mode not supported" do - assert_raise ArgumentError, fn -> - drop = {:drop, index(:posts, [:id], name: "posts$main"), :restrict} - execute_ddl(drop) - end - end - - test "drop index concurrently not supported" do - index = index(:posts, [:id], name: "posts$main") - assert_raise ArgumentError, fn -> - drop = {:drop, %{index | concurrently: true}} - execute_ddl(drop) - end - end - - test "drop index with cascade" do - assert_raise ArgumentError, fn -> - drop = {:drop, index(:posts, [:id], name: "posts$main"), :cascade} - execute_ddl(drop) - end - - assert_raise ArgumentError, fn -> - drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo), :cascade} - execute_ddl(drop) - end - end - - test "drop constraint" do - assert_raise ArgumentError, ~r/SQLite3 does not support ALTER TABLE DROP CONSTRAINT./, fn -> - execute_ddl({:drop, constraint(:products, "price_must_be_positive", prefix: :foo), :restrict}) - end - end - - test "drop_if_exists constraint" do - assert_raise ArgumentError, ~r/SQLite3 does not support ALTER TABLE DROP CONSTRAINT./, fn -> - execute_ddl({:drop_if_exists, constraint(:products, "price_must_be_positive", prefix: :foo), :restrict}) - end - end - - test "rename table" do - rename = {:rename, table(:posts), table(:new_posts)} - assert execute_ddl(rename) == [~s|ALTER TABLE "posts" RENAME TO "new_posts"|] - end - - test "rename table with prefix" do - rename = {:rename, table(:posts, prefix: :foo), table(:new_posts, prefix: :foo)} - assert execute_ddl(rename) == [~s|ALTER TABLE "foo"."posts" RENAME TO "new_posts"|] - end - - test "rename column" do - rename = {:rename, table(:posts), :given_name, :first_name} - assert execute_ddl(rename) == [~s|ALTER TABLE "posts" RENAME COLUMN "given_name" TO "first_name"|] - end - - test "rename column in prefixed table" do - rename = {:rename, table(:posts, prefix: :foo), :given_name, :first_name} - assert execute_ddl(rename) == [~s|ALTER TABLE "foo"."posts" RENAME COLUMN "given_name" TO "first_name"|] - end - - test "autoincrement support" do - serial = {:create, table(:posts), [{:add, :id, :serial, [primary_key: true]}]} - bigserial = {:create, table(:posts), [{:add, :id, :bigserial, [primary_key: true]}]} - id = {:create, table(:posts), [{:add, :id, :id, [primary_key: true]}]} - integer = {:create, table(:posts), [{:add, :id, :integer, [primary_key: true]}]} - - assert execute_ddl(serial) == [ - ~s/CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT)/ - ] - - assert execute_ddl(bigserial) == [ - ~s/CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY AUTOINCREMENT)/ - ] - - assert execute_ddl(id) == [~s/CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY)/] - assert execute_ddl(integer) == [~s/CREATE TABLE "posts" ("id" INTEGER PRIMARY KEY)/] - end - - defp remove_newlines(string) do - string |> String.trim |> String.replace("\n", " ") - end -end diff --git a/test/ecto/integration/crud_test.exs b/test/ecto/integration/crud_test.exs index 51267d9..50d7cba 100644 --- a/test/ecto/integration/crud_test.exs +++ b/test/ecto/integration/crud_test.exs @@ -219,7 +219,7 @@ defmodule Ecto.Integration.CrudTest do test "can handle in" do TestRepo.insert!(%Account{name: "hi"}) assert [] = TestRepo.all(from(a in Account, where: a.name in ["404"])) - assert [_] = TestRepo.all(from(a in Account, where: a.name in ["hi"])) + assert [_] = TestRepo.all(from(a in Account, where: a.name in [^"hi"])) end test "handles case sensitive text" do From 57b2d2687fac4fd9353306387eeb4fe3575dc140 Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Tue, 24 Jan 2023 15:27:56 -0600 Subject: [PATCH 08/17] formatting --- lib/ecto/adapters/sqlite3.ex | 5 ++--- lib/ecto/adapters/sqlite3/connection.ex | 8 ++++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/ecto/adapters/sqlite3.ex b/lib/ecto/adapters/sqlite3.ex index 5b575e7..7045fc9 100644 --- a/lib/ecto/adapters/sqlite3.ex +++ b/lib/ecto/adapters/sqlite3.ex @@ -333,7 +333,7 @@ defmodule Ecto.Adapters.SQLite3 do @impl Ecto.Adapter def loaders(:map, type) do - [&Codec.json_decode/1, type] + [&Codec.json_decode/1, type] end @impl Ecto.Adapter @@ -440,7 +440,7 @@ defmodule Ecto.Adapters.SQLite3 do end @impl Ecto.Adapter - def dumpers({:array, _}, type) do + def dumpers({:array, _}, type) do [type, &Codec.json_encode/1] end @@ -453,7 +453,6 @@ defmodule Ecto.Adapters.SQLite3 do @impl Ecto.Adapter def dumpers(_, type), do: [type] - ## ## HELPERS ## diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index a365a7aa..eddcb72 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -1320,15 +1320,15 @@ defmodule Ecto.Adapters.SQLite3.Connection do end # Super Hack to handle arrays in json - def expr({:in, a, [left,"["<> _ = right]}, sources, query) do + def expr({:in, a, [left, "[" <> _ = right]}, sources, query) do case Codec.json_decode(right) do {:ok, arr} -> expr({:in, a, [left, arr]}, sources, query) + _ -> raise Ecto.QueryError, - query: query, - message: "Malformed query on right hand side of #{right} in." - + query: query, + message: "Malformed query on right hand side of #{right} in." end end From 97add895fb930948e97870d1d133a3c5ced70e30 Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Wed, 25 Jan 2023 10:10:56 -0600 Subject: [PATCH 09/17] removing array ops --- lib/ecto/adapters/sqlite3.ex | 20 ++++--- lib/ecto/adapters/sqlite3/connection.ex | 54 +++++++++---------- .../ecto/adapters/sqlite3/connection_test.exs | 26 ++++++--- 3 files changed, 58 insertions(+), 42 deletions(-) diff --git a/lib/ecto/adapters/sqlite3.ex b/lib/ecto/adapters/sqlite3.ex index 7045fc9..5e80252 100644 --- a/lib/ecto/adapters/sqlite3.ex +++ b/lib/ecto/adapters/sqlite3.ex @@ -322,13 +322,13 @@ defmodule Ecto.Adapters.SQLite3 do end @impl Ecto.Adapter - def loaders({:array, _}, type) do - [&Codec.json_decode/1, type] + def loaders({:map, _}, type) do + [&Codec.json_decode/1, &Ecto.Type.embedded_load(type, &1, :json)] end @impl Ecto.Adapter - def loaders({:map, _}, type) do - [&Codec.json_decode/1, &Ecto.Type.embedded_load(type, &1, :json)] + def loaders({:array, _}, type) do + [&Codec.json_decode/1, type] end @impl Ecto.Adapter @@ -445,13 +445,19 @@ defmodule Ecto.Adapters.SQLite3 do end @impl Ecto.Adapter - def dumpers({:in, sub}, {:in, sub}), do: [{:array, sub}, &Codec.json_encode/1] + def dumpers({:map, _}, type) do + [&Ecto.Type.embedded_dump(type, &1, :json), &Codec.json_encode/1] + end @impl Ecto.Adapter - def dumpers({:map, _}, type), do: [type, &Ecto.Type.embedded_dump(type, &1, :json)] + def dumpers(:map, type) do + [type, &Codec.json_encode/1] + end @impl Ecto.Adapter - def dumpers(_, type), do: [type] + def dumpers(_primitive, type) do + [type] + end ## ## HELPERS diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index eddcb72..9dd7767 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -1002,22 +1002,18 @@ defmodule Ecto.Adapters.SQLite3.Connection do ] end - defp update_op(:push, quoted_key, value, sources, query) do - [ - quoted_key, - " = JSON_INSERT(", - quoted_key, - ",'$[#]',", - expr(value, sources, query), - ?) - ] + defp update_op(:push, _quoted_key, _value, _sources, query) do + raise Ecto.QueryError, + query: query, + message: + "Arrays are not supported for SQLite3" end defp update_op(:pull, _quoted_key, _value, _sources, query) do raise Ecto.QueryError, query: query, message: - "pull is not supported for SQLite3, if you can figure out a way to do with JSON array's please pull request into ecto_sqlite3." + "Arrays are not supported for SQLite3" end defp update_op(command, _quoted_key, _value, _sources, query) do @@ -1297,6 +1293,10 @@ defmodule Ecto.Adapters.SQLite3.Connection do source end + def expr({:in, _, [_left, "[]"]}, _sources, _query) do + "0" + end + def expr({:in, _, [_left, []]}, _sources, _query) do "0" end @@ -1320,16 +1320,14 @@ defmodule Ecto.Adapters.SQLite3.Connection do end # Super Hack to handle arrays in json - def expr({:in, a, [left, "[" <> _ = right]}, sources, query) do - case Codec.json_decode(right) do - {:ok, arr} -> - expr({:in, a, [left, arr]}, sources, query) - - _ -> - raise Ecto.QueryError, - query: query, - message: "Malformed query on right hand side of #{right} in." - end + def expr({:in, _, [left, right]}, sources, query) do + [ + expr(left, sources, query), + " IN (SELECT value FROM JSON_EACH(", + expr(right, sources, query), + ?), + ?) + ] end def expr({:is_nil, _, [arg]}, sources, query) do @@ -1453,15 +1451,17 @@ defmodule Ecto.Adapters.SQLite3.Connection do end end - # Hack cause I can't get arrays to work - def expr("[" <> _ = list, _sources, _query) do - ["JSON_ARRAY('", list, "')"] + # TODO It technically is, its just a json array, so we *could* support it + def expr("[" <> _list, _sources, query) do + raise Ecto.QueryError, + query: query, + message: "Array literals are not supported by SQLite3" end - def expr(list, _sources, _query) when is_list(list) do - library = Application.get_env(:ecto_sqlite3, :json_library, Jason) - expression = IO.iodata_to_binary(library.encode_to_iodata!(list)) - ["JSON_ARRAY('", expression, "')"] + def expr(list, _sources, query) when is_list(list) do + raise Ecto.QueryError, + query: query, + message: "Array literals are not supported by SQLite3" end def expr(%Decimal{} = decimal, _sources, _query) do diff --git a/test/ecto/adapters/sqlite3/connection_test.exs b/test/ecto/adapters/sqlite3/connection_test.exs index 80236e6..26a6ba0 100644 --- a/test/ecto/adapters/sqlite3/connection_test.exs +++ b/test/ecto/adapters/sqlite3/connection_test.exs @@ -860,7 +860,11 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> select([e], e in [1, 2, 3]) |> plan() - assert all(query) == ~s{SELECT s0 IN (1,2,3) FROM "schema" AS s0} + assert_raise Ecto.QueryError, fn -> + all(query) + end + + #assert all(query) == ~s{SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM "schema" AS s0} query = Schema |> select([e], 1 in [1, e.x, 3]) |> plan() assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} @@ -950,15 +954,20 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do test "arrays and sigils" do query = Schema |> select([], fragment("?", [1, 2, 3])) |> plan() - assert all(query) == ~s{SELECT JSON_ARRAY('[1,2,3]') FROM "schema" AS s0} + assert_raise Ecto.QueryError, fn -> + all(query) + end query = Schema |> select([], fragment("?", ~w(abc def))) |> plan() - assert all(query) == ~s{SELECT JSON_ARRAY('["abc","def"]') FROM "schema" AS s0} + assert_raise Ecto.QueryError, fn -> + all(query) + end query = Schema |> where([s], s.w == []) |> select([s], s.w) |> plan() - assert all(query) == - ~s{SELECT s0."w" FROM "schema" AS s0 WHERE (s0."w" = JSON_ARRAY('[]'))} + assert_raise Ecto.QueryError, fn -> + all(query) + end end test "interpolated values" do @@ -1133,14 +1142,15 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do test "update all array ops" do query = from(m in Schema, update: [push: [w: 0]]) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "w" = JSON_INSERT("w",'$[#]',0)} + assert_raise Ecto.QueryError, fn -> + update_all(query) + end query = from(m in Schema, update: [pull: [w: 0]]) |> plan(:update_all) - assert_raise Ecto.QueryError, fn -> update_all(query) end + end test "update all with subquery" do From 052ce4e6ceced665cc9bd64f814fb0c036fb450e Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Wed, 25 Jan 2023 10:11:49 -0600 Subject: [PATCH 10/17] fixing left join --- test/ecto/adapters/sqlite3/connection_test.exs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/ecto/adapters/sqlite3/connection_test.exs b/test/ecto/adapters/sqlite3/connection_test.exs index 26a6ba0..7e933d7 100644 --- a/test/ecto/adapters/sqlite3/connection_test.exs +++ b/test/ecto/adapters/sqlite3/connection_test.exs @@ -1191,7 +1191,6 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do ~s{UPDATE "first"."schema" AS s0 SET "x" = 0} end - # TODO this is broken? test "update all with left join" do query = from(m in Schema, @@ -1201,8 +1200,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do ) |> plan(:update_all) - assert update_all(query) == - ~s{UPDATE "schema" AS s0 SET "w" = s0."list2" FROM "schema2" AS s1 LEFT OUTER JOIN "schema3" AS s2 ON s2."id" = s0."y" WHERE (s1."z" = s0."x")} + assert update_all(query) == "UPDATE \"schema\" AS s0 SET \"w\" = s0.\"list2\" FROM \"schema2\" AS s1, \"schema3\" AS s2 WHERE (s1.\"z\" = s0.\"x\") AND (s2.\"id\" = s0.\"y\")" end test "delete all" do From ed3d69620bdc6d18179e7add4e78eec812b0753d Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Wed, 25 Jan 2023 10:12:55 -0600 Subject: [PATCH 11/17] fixing broken test --- test/ecto/integration/crud_test.exs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/ecto/integration/crud_test.exs b/test/ecto/integration/crud_test.exs index 50d7cba..e09dd99 100644 --- a/test/ecto/integration/crud_test.exs +++ b/test/ecto/integration/crud_test.exs @@ -218,7 +218,7 @@ defmodule Ecto.Integration.CrudTest do describe "select" do test "can handle in" do TestRepo.insert!(%Account{name: "hi"}) - assert [] = TestRepo.all(from(a in Account, where: a.name in ["404"])) + assert [] = TestRepo.all(from(a in Account, where: a.name in [^"404"])) assert [_] = TestRepo.all(from(a in Account, where: a.name in [^"hi"])) end From db5e2cd5d2f5ab4144aacfa8b6490961aa9706e1 Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Wed, 25 Jan 2023 10:13:18 -0600 Subject: [PATCH 12/17] fixing formatting --- lib/ecto/adapters/sqlite3/connection.ex | 6 ++---- test/ecto/adapters/sqlite3/connection_test.exs | 9 ++++++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index 9dd7767..e901c9a 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -1005,15 +1005,13 @@ defmodule Ecto.Adapters.SQLite3.Connection do defp update_op(:push, _quoted_key, _value, _sources, query) do raise Ecto.QueryError, query: query, - message: - "Arrays are not supported for SQLite3" + message: "Arrays are not supported for SQLite3" end defp update_op(:pull, _quoted_key, _value, _sources, query) do raise Ecto.QueryError, query: query, - message: - "Arrays are not supported for SQLite3" + message: "Arrays are not supported for SQLite3" end defp update_op(command, _quoted_key, _value, _sources, query) do diff --git a/test/ecto/adapters/sqlite3/connection_test.exs b/test/ecto/adapters/sqlite3/connection_test.exs index 7e933d7..cef134a 100644 --- a/test/ecto/adapters/sqlite3/connection_test.exs +++ b/test/ecto/adapters/sqlite3/connection_test.exs @@ -864,7 +864,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do all(query) end - #assert all(query) == ~s{SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM "schema" AS s0} + # assert all(query) == ~s{SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM "schema" AS s0} query = Schema |> select([e], 1 in [1, e.x, 3]) |> plan() assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} @@ -954,11 +954,13 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do test "arrays and sigils" do query = Schema |> select([], fragment("?", [1, 2, 3])) |> plan() + assert_raise Ecto.QueryError, fn -> all(query) end query = Schema |> select([], fragment("?", ~w(abc def))) |> plan() + assert_raise Ecto.QueryError, fn -> all(query) end @@ -1147,10 +1149,10 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do end query = from(m in Schema, update: [pull: [w: 0]]) |> plan(:update_all) + assert_raise Ecto.QueryError, fn -> update_all(query) end - end test "update all with subquery" do @@ -1200,7 +1202,8 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do ) |> plan(:update_all) - assert update_all(query) == "UPDATE \"schema\" AS s0 SET \"w\" = s0.\"list2\" FROM \"schema2\" AS s1, \"schema3\" AS s2 WHERE (s1.\"z\" = s0.\"x\") AND (s2.\"id\" = s0.\"y\")" + assert update_all(query) == + "UPDATE \"schema\" AS s0 SET \"w\" = s0.\"list2\" FROM \"schema2\" AS s1, \"schema3\" AS s2 WHERE (s1.\"z\" = s0.\"x\") AND (s2.\"id\" = s0.\"y\")" end test "delete all" do From bc94ac7b5fa1cc4ae2beacfa1911075ccfe97b2b Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Wed, 25 Jan 2023 10:58:22 -0600 Subject: [PATCH 13/17] bleh --- lib/ecto/adapters/sqlite3/connection.ex | 42 ++++------ .../ecto/adapters/sqlite3/connection_test.exs | 79 +++++-------------- 2 files changed, 33 insertions(+), 88 deletions(-) diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index e901c9a..dbedd4f 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -376,11 +376,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do raise ArgumentError, "SQLite3 adapter does not support keyword lists in :options" end - @impl true - def execute_ddl({_command, %Table{comment: comment}, _}) when not is_nil(comment) do - raise ArgumentError, "SQLite3 adapter does not support comments" - end - @impl true def execute_ddl({:create, %Table{} = table, columns}) do {table, composite_pk_def} = composite_pk_definition(table, columns) @@ -470,11 +465,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do end) end - @impl true - def execute_ddl({_, %Index{comment: c}}) when not is_nil(c) do - raise ArgumentError, "comment is not supported with SQLite3" - end - @impl true def execute_ddl({_, %Index{concurrently: true}}) do raise ArgumentError, "`concurrently` is not supported with SQLite3" @@ -1187,10 +1177,10 @@ defmodule Ecto.Adapters.SQLite3.Connection do Enum.map(combinations, &combination/1) end - defp combination({:union, query}), do: [" UNION (", all(query), ?)] - defp combination({:union_all, query}), do: [" UNION ALL (", all(query), ?)] - defp combination({:except, query}), do: [" EXCEPT (", all(query), ?)] - defp combination({:intersect, query}), do: [" INTERSECT (", all(query), ?)] + defp combination({:union, query}), do: [" UNION ", all(query)] + defp combination({:union_all, query}), do: [" UNION ALL ", all(query)] + defp combination({:except, query}), do: [" EXCEPT ", all(query)] + defp combination({:intersect, query}), do: [" INTERSECT ", all(query)] defp combination({:except_all, query}) do raise Ecto.QueryError, @@ -1372,21 +1362,25 @@ defmodule Ecto.Adapters.SQLite3.Connection do def expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do [ - "datetime(", + "CAST (", + "strftime('%Y-%m-%d %H:%M:%f000Z'", + ",", expr(datetime, sources, query), ",", interval(count, interval, sources), - ")" + ") AS TEXT)" ] end def expr({:date_add, _, [date, count, interval]}, sources, query) do [ - "date(", + "CAST (", + "strftime('%Y-%m-%d'", + ",", expr(date, sources, query), ",", interval(count, interval, sources), - ")" + ") AS TEXT)" ] end @@ -1450,12 +1444,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do end # TODO It technically is, its just a json array, so we *could* support it - def expr("[" <> _list, _sources, query) do - raise Ecto.QueryError, - query: query, - message: "Array literals are not supported by SQLite3" - end - def expr(list, _sources, query) when is_list(list) do raise Ecto.QueryError, query: query, @@ -1509,15 +1497,15 @@ defmodule Ecto.Adapters.SQLite3.Connection do end def interval(count, "millisecond", sources) do - "('#{expr(count, sources, nil)} / 1000.0) seconds'" + "(#{expr(count, sources, nil)} / 1000.0) || ' seconds'" end def interval(count, "week", sources) do - "('#{expr(count, sources, nil)} * 7) days'" + "(#{expr(count, sources, nil)} * 7) || ' days'" end def interval(count, interval, sources) do - "'#{expr(count, sources, nil)} #{interval}'" + "#{expr(count, sources, nil)} || ' #{interval}'" end defp op_to_binary({op, _, [_, _]} = expression, sources, query) diff --git a/test/ecto/adapters/sqlite3/connection_test.exs b/test/ecto/adapters/sqlite3/connection_test.exs index cef134a..1479d29 100644 --- a/test/ecto/adapters/sqlite3/connection_test.exs +++ b/test/ecto/adapters/sqlite3/connection_test.exs @@ -169,8 +169,8 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do ~s{WITH RECURSIVE "tree" AS } <> ~s{(SELECT sc0."id" AS "id", 1 AS "depth" FROM "categories" AS sc0 WHERE (sc0."parent_id" IS NULL) } <> ~s{UNION ALL } <> - ~s{(SELECT c0."id", t1."depth" + 1 FROM "categories" AS c0 } <> - ~s{INNER JOIN "tree" AS t1 ON t1."id" = c0."parent_id")) } <> + ~s{SELECT c0."id", t1."depth" + 1 FROM "categories" AS c0 } <> + ~s{INNER JOIN "tree" AS t1 ON t1."id" = c0."parent_id") } <> ~s{SELECT s0."x", t1."id", CAST(t1."depth" AS INTEGER) } <> ~s{FROM "schema" AS s0 } <> ~s{INNER JOIN "tree" AS t1 ON t1."id" = s0."category_id"} @@ -212,9 +212,9 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do ~s{FROM "posts" AS p0 } <> ~s{INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = p0."guid" } <> ~s{UNION ALL } <> - ~s{(SELECT v0."title", c1."text" } <> + ~s{SELECT v0."title", c1."text" } <> ~s{FROM "videos" AS v0 } <> - ~s{INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = v0."guid")} + ~s{INNER JOIN "comments_scope" AS c1 ON c1."entity_id" = v0."guid"} end test "fragment CTE" do @@ -478,16 +478,16 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 } <> - ~s{UNION (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> - ~s{UNION (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{UNION SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20 } <> + ~s{UNION SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30 } <> ~s{ORDER BY rand LIMIT 5 OFFSET 10} query = base_query |> union_all(^union_query1) |> union_all(^union_query2) |> plan() assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 } <> - ~s{UNION ALL (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> - ~s{UNION ALL (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{UNION ALL SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20 } <> + ~s{UNION ALL SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30 } <> ~s{ORDER BY rand LIMIT 5 OFFSET 10} end @@ -505,8 +505,8 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 } <> - ~s{EXCEPT (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> - ~s{EXCEPT (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{EXCEPT SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20 } <> + ~s{EXCEPT SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30 } <> ~s{ORDER BY rand LIMIT 5 OFFSET 10} query = @@ -535,8 +535,8 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 } <> - ~s{INTERSECT (SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20) } <> - ~s{INTERSECT (SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30) } <> + ~s{INTERSECT SELECT s0."y" FROM "schema" AS s0 ORDER BY s0."y" LIMIT 40 OFFSET 20 } <> + ~s{INTERSECT SELECT s0."z" FROM "schema" AS s0 ORDER BY s0."z" LIMIT 60 OFFSET 30 } <> ~s{ORDER BY rand LIMIT 5 OFFSET 10} query = @@ -719,7 +719,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - ~s{SELECT 1 FROM "schema" AS s0 WHERE (datetime(s0."foo",'1 month') > s0."bar")} + ~s{SELECT 1 FROM "schema" AS s0 WHERE (CAST (strftime('%Y-%m-%d %H:%M:%f000Z',s0.\"foo\",1 || ' month') AS TEXT) > s0."bar")} query = "schema" @@ -728,7 +728,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - ~s{SELECT 1 FROM "schema" AS s0 WHERE (datetime(CAST(s0."foo" AS TEXT),'1 month') > s0."bar")} + ~s{SELECT 1 FROM "schema" AS s0 WHERE (CAST (strftime('%Y-%m-%d %H:%M:%f000Z',CAST(s0.\"foo\" AS TEXT),1 || ' month') AS TEXT) > s0."bar")} end test "tagged type" do @@ -860,11 +860,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> select([e], e in [1, 2, 3]) |> plan() - assert_raise Ecto.QueryError, fn -> - all(query) - end - - # assert all(query) == ~s{SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM "schema" AS s0} + assert all(query) == ~s{SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM "schema" AS s0} query = Schema |> select([e], 1 in [1, e.x, 3]) |> plan() assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} @@ -966,10 +962,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do end query = Schema |> where([s], s.w == []) |> select([s], s.w) |> plan() - - assert_raise Ecto.QueryError, fn -> - all(query) - end + assert all(query) == ~s{SELECT s0."w" FROM "schema" AS s0 WHERE (s0."w" = '[]')} end test "interpolated values" do @@ -1005,8 +998,8 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do "SELECT s0.\"id\", ? FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON ? " <> "INNER JOIN \"schema2\" AS s2 ON ? WHERE (?) AND (?) " <> "GROUP BY ?, ? HAVING (?) AND (?) " <> - "UNION (SELECT s0.\"id\", ? FROM \"schema1\" AS s0 WHERE (?)) " <> - "UNION ALL (SELECT s0.\"id\", ? FROM \"schema2\" AS s0 WHERE (?)) " <> + "UNION SELECT s0.\"id\", ? FROM \"schema1\" AS s0 WHERE (?) " <> + "UNION ALL SELECT s0.\"id\", ? FROM \"schema2\" AS s0 WHERE (?) " <> "ORDER BY ? LIMIT ? OFFSET ?" assert all(query) == String.trim(result) @@ -1882,22 +1875,6 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do ] end - test "raise on table comment" do - create = - {:create, table(:posts, comment: "comment"), - [ - {:add, :category_0, %Reference{table: :categories}, - [comment: "column comment"]}, - {:add, :created_at, :timestamp, []}, - {:add, :updated_at, :timestamp, [comment: "column comment 2"]} - ]} - - assert_raise ArgumentError, ~r/comment/, fn -> - execute_ddl(create) - end - end - - # TODO should we raise on comment? test "create table with comment on columns" do create = {:create, table(:posts), @@ -2262,26 +2239,6 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do [~s|CREATE INDEX "posts$main" ON "foo"."posts" (lower(permalink))|] end - test "raise on create index with comment" do - create = - {:create, - index(:posts, [:category_id, :permalink], prefix: :foo, comment: "comment")} - - assert_raise ArgumentError, ~r/comment/, fn -> - execute_ddl(create) - end - end - - test "create index with comment" do - create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo)} - - assert execute_ddl(create) == [ - remove_newlines(""" - CREATE INDEX "posts_category_id_permalink_index" ON "foo"."posts" ("category_id", "permalink") - """) - ] - end - test "create unique index" do create = {:create, index(:posts, [:permalink], unique: true)} From a66cbea94d892e94a81d7d1cc6bc2568e6f37b6a Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Wed, 25 Jan 2023 11:00:10 -0600 Subject: [PATCH 14/17] warnings --- lib/ecto/adapters/sqlite3/connection.ex | 1 - test/ecto/adapters/sqlite3/connection_test.exs | 7 ++++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index dbedd4f..4869b06 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -14,7 +14,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do alias Ecto.Query.WithExpr import Ecto.Adapters.SQLite3.DataType - alias Ecto.Adapters.SQLite3.Codec @parent_as __MODULE__ diff --git a/test/ecto/adapters/sqlite3/connection_test.exs b/test/ecto/adapters/sqlite3/connection_test.exs index 1479d29..e615876 100644 --- a/test/ecto/adapters/sqlite3/connection_test.exs +++ b/test/ecto/adapters/sqlite3/connection_test.exs @@ -719,7 +719,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - ~s{SELECT 1 FROM "schema" AS s0 WHERE (CAST (strftime('%Y-%m-%d %H:%M:%f000Z',s0.\"foo\",1 || ' month') AS TEXT) > s0."bar")} + ~s{SELECT 1 FROM "schema" AS s0 WHERE (CAST (strftime('%Y-%m-%d %H:%M:%f000Z',s0.\"foo\",1 || ' month') AS TEXT) > s0."bar")} query = "schema" @@ -728,7 +728,7 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> plan() assert all(query) == - ~s{SELECT 1 FROM "schema" AS s0 WHERE (CAST (strftime('%Y-%m-%d %H:%M:%f000Z',CAST(s0.\"foo\" AS TEXT),1 || ' month') AS TEXT) > s0."bar")} + ~s{SELECT 1 FROM "schema" AS s0 WHERE (CAST (strftime('%Y-%m-%d %H:%M:%f000Z',CAST(s0.\"foo\" AS TEXT),1 || ' month') AS TEXT) > s0."bar")} end test "tagged type" do @@ -860,7 +860,8 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> select([e], e in [1, 2, 3]) |> plan() - assert all(query) == ~s{SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM "schema" AS s0} + assert all(query) == + ~s{SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM "schema" AS s0} query = Schema |> select([e], 1 in [1, e.x, 3]) |> plan() assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} From eba8cbf01218be1d7157625dcc7cd7a529fd9afc Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Wed, 25 Jan 2023 11:51:37 -0600 Subject: [PATCH 15/17] remove restrict/cascade errors --- integration_test/hints_test.exs | 15 --------- integration_test/test_helper.exs | 4 +++ lib/ecto/adapters/sqlite3/connection.ex | 32 ++----------------- .../ecto/adapters/sqlite3/connection_test.exs | 31 ------------------ 4 files changed, 7 insertions(+), 75 deletions(-) diff --git a/integration_test/hints_test.exs b/integration_test/hints_test.exs index 935e2fb..e56239b 100644 --- a/integration_test/hints_test.exs +++ b/integration_test/hints_test.exs @@ -6,21 +6,6 @@ defmodule Ecto.Integration.HintsTest do alias Ecto.Integration.Post alias Ecto.Integration.TestRepo - test "join hints" do - {:ok, _} = TestRepo.query("CREATE INDEX post_id_idx ON posts (id)") - TestRepo.insert!(%Post{id: 1}) - - results = - from(p in Post, - join: p2 in Post, - on: p.id == p2.id, - hints: ["INDEXED BY post_id_idx"] - ) - |> TestRepo.all() - - assert [%Post{id: 1}] = results - end - test "from hints" do {:ok, _} = TestRepo.query("CREATE INDEX post_id_idx ON posts (id)") TestRepo.insert!(%Post{id: 1}) diff --git a/integration_test/test_helper.exs b/integration_test/test_helper.exs index ca3dbf0..c622829 100644 --- a/integration_test/test_helper.exs +++ b/integration_test/test_helper.exs @@ -108,6 +108,7 @@ ExUnit.start( :alter_foreign_key, :assigns_id_type, :modify_column, + :restrict, # SQLite3 does not support the concat function :concat, @@ -121,5 +122,8 @@ ExUnit.start( :selected_as_with_order_by, :selected_as_with_order_by_expression, :selected_as_with_having, + + # Distinct with options not supported + :distinct_count ] ) diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index 4869b06..3170088 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -370,11 +370,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do ## DDL ## - @impl true - def execute_ddl({_command, %Table{options: options}, _}) when is_list(options) do - raise ArgumentError, "SQLite3 adapter does not support keyword lists in :options" - end - @impl true def execute_ddl({:create, %Table{} = table, columns}) do {table, composite_pk_def} = composite_pk_definition(table, columns) @@ -426,14 +421,7 @@ defmodule Ecto.Adapters.SQLite3.Connection do end @impl true - def execute_ddl({:drop, %Table{} = table, mode}) do - if mode != [] do - raise ArgumentError, """ - `#{inspect(mode)}` is not supported for DROP TABLE with SQLite3 \ - DROP TABLE #{table.name} cannot have options set. - """ - end - + def execute_ddl({:drop, %Table{} = table, _mode}) do execute_ddl({:drop, table}) end @@ -540,14 +528,7 @@ defmodule Ecto.Adapters.SQLite3.Connection do end @impl true - def execute_ddl({:drop, %Index{} = index, mode}) do - if mode != [] do - raise ArgumentError, """ - `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ - DROP INDEX #{index.name} cannot have options set. - """ - end - + def execute_ddl({:drop, %Index{} = index, _mode}) do execute_ddl({:drop, index}) end @@ -567,14 +548,7 @@ defmodule Ecto.Adapters.SQLite3.Connection do end @impl true - def execute_ddl({:drop_if_exists, %Index{} = index, mode}) do - if mode != [] do - raise ArgumentError, """ - `#{inspect(mode)}` is not supported for DROP INDEX with SQLite3 \ - DROP INDEX #{index.name} cannot have options set. - """ - end - + def execute_ddl({:drop_if_exists, %Index{} = index, _mode}) do execute_ddl({:drop_if_exists, index}) end diff --git a/test/ecto/adapters/sqlite3/connection_test.exs b/test/ecto/adapters/sqlite3/connection_test.exs index e615876..81b56e9 100644 --- a/test/ecto/adapters/sqlite3/connection_test.exs +++ b/test/ecto/adapters/sqlite3/connection_test.exs @@ -2139,18 +2139,6 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert execute_ddl(drop) == [~s|DROP TABLE "foo"."posts"|] end - test "drop table with cascade" do - drop = {:drop, table(:posts), :cascade} - - assert_raise ArgumentError, fn -> - execute_ddl(drop) - end - - assert_raise ArgumentError, fn -> - execute_ddl(drop) - end - end - test "alter table" do alter = {:alter, table(:posts), @@ -2314,13 +2302,6 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do assert execute_ddl(drop) == [~s|DROP INDEX "foo"."posts$main"|] end - test "drop index mode not supported" do - assert_raise ArgumentError, fn -> - drop = {:drop, index(:posts, [:id], name: "posts$main"), :restrict} - execute_ddl(drop) - end - end - test "drop index concurrently not supported" do index = index(:posts, [:id], name: "posts$main") @@ -2330,18 +2311,6 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do end end - test "drop index with cascade" do - assert_raise ArgumentError, fn -> - drop = {:drop, index(:posts, [:id], name: "posts$main"), :cascade} - execute_ddl(drop) - end - - assert_raise ArgumentError, fn -> - drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo), :cascade} - execute_ddl(drop) - end - end - test "drop constraint" do assert_raise ArgumentError, ~r/SQLite3 does not support ALTER TABLE DROP CONSTRAINT./, From 1cc23067df4b35423e8aa763bf8b8c18a8c5221d Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Wed, 25 Jan 2023 11:57:31 -0600 Subject: [PATCH 16/17] LINTERS --- .../ecto/adapters/sqlite3/connection_test.exs | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/test/ecto/adapters/sqlite3/connection_test.exs b/test/ecto/adapters/sqlite3/connection_test.exs index 81b56e9..3f7726b 100644 --- a/test/ecto/adapters/sqlite3/connection_test.exs +++ b/test/ecto/adapters/sqlite3/connection_test.exs @@ -3,8 +3,8 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do import Ecto.Query - alias Ecto.Queryable alias Ecto.Adapters.SQLite3.Connection, as: SQL + alias Ecto.Queryable defmodule Schema do use Ecto.Schema @@ -993,15 +993,16 @@ defmodule Ecto.Adapters.SQLite3.ConnectionTest do |> offset([], ^9) |> plan() - result = - "WITH \"cte1\" AS (SELECT ss0.\"id\" AS \"id\", ? AS \"smth\" FROM \"schema1\" AS ss0 WHERE (?)), " <> - "\"cte2\" AS (SELECT * FROM schema WHERE ?) " <> - "SELECT s0.\"id\", ? FROM \"schema\" AS s0 INNER JOIN \"schema2\" AS s1 ON ? " <> - "INNER JOIN \"schema2\" AS s2 ON ? WHERE (?) AND (?) " <> - "GROUP BY ?, ? HAVING (?) AND (?) " <> - "UNION SELECT s0.\"id\", ? FROM \"schema1\" AS s0 WHERE (?) " <> - "UNION ALL SELECT s0.\"id\", ? FROM \"schema2\" AS s0 WHERE (?) " <> - "ORDER BY ? LIMIT ? OFFSET ?" + result = """ + WITH "cte1" AS (SELECT ss0."id" AS "id", ? AS "smth" FROM "schema1" AS ss0 WHERE (?)), \ + "cte2" AS (SELECT * FROM schema WHERE ?) \ + SELECT s0."id", ? FROM "schema" AS s0 INNER JOIN "schema2" AS s1 ON ? \ + INNER JOIN "schema2" AS s2 ON ? WHERE (?) AND (?) \ + GROUP BY ?, ? HAVING (?) AND (?) \ + UNION SELECT s0."id", ? FROM "schema1" AS s0 WHERE (?) \ + UNION ALL SELECT s0."id", ? FROM "schema2" AS s0 WHERE (?) \ + ORDER BY ? LIMIT ? OFFSET ?\ + """ assert all(query) == String.trim(result) end From f21410ef0c051f8668a6f9a07d8638dd5870fb26 Mon Sep 17 00:00:00 2001 From: Jason Stiebs Date: Wed, 1 Mar 2023 10:55:11 -0600 Subject: [PATCH 17/17] pr feedback --- lib/ecto/adapters/sqlite3/connection.ex | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ecto/adapters/sqlite3/connection.ex b/lib/ecto/adapters/sqlite3/connection.ex index 9f46cb7..0e3cf92 100644 --- a/lib/ecto/adapters/sqlite3/connection.ex +++ b/lib/ecto/adapters/sqlite3/connection.ex @@ -1003,7 +1003,6 @@ defmodule Ecto.Adapters.SQLite3.Connection do join, " AS ", name, - Enum.map(hints, &[?\s | &1]), join_on(qual, expression, sources, query) ] end)