diff --git a/lib/ecto/adapters/exqlite.ex b/lib/ecto/adapters/exqlite.ex new file mode 100644 index 00000000..ff3835a7 --- /dev/null +++ b/lib/ecto/adapters/exqlite.ex @@ -0,0 +1,140 @@ +defmodule Ecto.Adapters.Exqlite do + use Ecto.Adapters.SQL, + driver: :exqlite + + import String, only: [to_integer: 1] + + @behaviour Ecto.Adapter.Storage + @behaviour Ecto.Adapter.Structure + + @impl true + def storage_down(options) do + db_path = Keyword.fetch!(options, :database) + + with :ok <- File.rm(db_path) do + File.rm(db_path <> "-shm") + File.rm(db_path <> "-wal") + :ok + else + _ -> {:error, :already_down} + end + end + + @impl true + def storage_status(options) do + db_path = Keyword.fetch!(options, :database) + + if File.exists?(db_path) do + :up + else + :down + end + end + + @impl true + def storage_up(options) do + db_path = Keyword.fetch!(options, :database) + + Path.dirname(db_path) |> File.mkdir_p!() + {:ok, db} = Exqlite.Sqlite3.open(db_path) + :ok = Exqlite.Sqlite3.close(db) + end + + @impl true + def supports_ddl_transaction?(), do: true + + @impl true + def structure_dump(_default, _config) do + # table = config[:migration_source] || "schema_migrations" + # path = config[:dump_path] || Path.join(default, "structure.sql") + # + # TODO: dump the database and select the migration versions + # + # with {:ok, versions} <- select_versions(table, config), + # {:ok, contents} <- dump(config), + # {:ok, contents} <- append_versions(table, versions, contents) do + # File.mkdir_p!(Path.dirname(path)) + # File.write!(path, contents) + # {:ok, path} + # end + {:error, :not_implemented} + end + + @impl true + def structure_load(_default, _config) do + # load the structure.sql file + {:error, :not_implemented} + end + + @impl true + def loaders(:boolean, type), do: [&bool_decode/1, type] + def loaders(:binary_id, type), do: [Ecto.UUID, type] + def loaders(:utc_datetime, type), do: [&date_decode/1, type] + def loaders(:naive_datetime, type), do: [&date_decode/1, type] + + def loaders({:embed, _} = type, _), + do: [&json_decode/1, &Ecto.Adapters.SQL.load_embed(type, &1)] + + def loaders(:map, type), do: [&json_decode/1, type] + def loaders({:map, _}, type), do: [&json_decode/1, type] + def loaders({:array, _}, type), do: [&json_decode/1, type] + def loaders(:float, type), do: [&float_decode/1, type] + + def loaders(_primitive, type) do + [type] + end + + defp bool_decode(0), do: {:ok, false} + defp bool_decode(1), do: {:ok, true} + defp bool_decode(x), do: {:ok, x} + + defp date_decode( + <> + ) do + {:ok, {to_integer(year), to_integer(month), to_integer(day)}} + end + + defp date_decode( + <> + ) do + {:ok, + {{to_integer(year), to_integer(month), to_integer(day)}, + {to_integer(hour), to_integer(minute), to_integer(second), + to_integer(microsecond)}}} + end + + defp date_decode(x), do: {:ok, x} + + defp json_decode(x) when is_binary(x), + do: {:ok, Application.get_env(:ecto, :json_library).decode!(x)} + + defp json_decode(x), + do: {:ok, x} + + defp float_decode(x) when is_integer(x), do: {:ok, x / 1} + defp float_decode(x), do: {:ok, x} + + @impl true + def dumpers(:binary, type), do: [type, &blob_encode/1] + def dumpers(:binary_id, type), do: [type, Ecto.UUID] + def dumpers(:boolean, type), do: [type, &bool_encode/1] + def dumpers({:embed, _} = type, _), do: [&Ecto.Adapters.SQL.dump_embed(type, &1)] + def dumpers(:time, type), do: [type, &time_encode/1] + def dumpers(:naive_datetime, type), do: [type, &naive_datetime_encode/1] + def dumpers(_primitive, type), do: [type] + + defp blob_encode(value), do: {:ok, {:blob, value}} + + defp bool_encode(false), do: {:ok, 0} + defp bool_encode(true), do: {:ok, 1} + + defp time_encode(value) do + {:ok, value} + end + + defp naive_datetime_encode(value) do + {:ok, NaiveDateTime.to_iso8601(value)} + end +end diff --git a/lib/ecto/adapters/exqlite/connection.ex b/lib/ecto/adapters/exqlite/connection.ex new file mode 100644 index 00000000..e40dc520 --- /dev/null +++ b/lib/ecto/adapters/exqlite/connection.ex @@ -0,0 +1,1439 @@ +defmodule Ecto.Adapters.Exqlite.Connection do + @behaviour Ecto.Adapters.SQL.Connection + + @parent_as __MODULE__ + @drops [:drop, :drop_if_exists] + + alias Ecto.Migration.Constraint + alias Ecto.Migration.Index + alias Ecto.Migration.Reference + alias Ecto.Migration.Table + alias Ecto.Query.BooleanExpr + alias Ecto.Query.JoinExpr + alias Ecto.Query.QueryExpr + alias Ecto.Query.WithExpr + + @impl true + def child_spec(opts) do + {:ok, _} = Application.ensure_all_started(:db_connection) + DBConnection.child_spec(Exqlite.Connection, opts) + end + + @impl true + def prepare_execute(conn, name, sql, params, opts) do + query = %Exqlite.Query{name: name, statement: sql} + + case DBConnection.prepare_execute(conn, query, map_params(params), opts) do + {:ok, _, _} = ok -> + ok + + {:error, %Exqlite.Error{}} = error -> + error + + {:error, err} -> + raise err + end + end + + @impl true + def execute(conn, sql, params, opts) when is_binary(sql) or is_list(sql) do + query = %Exqlite.Query{name: "", statement: IO.iodata_to_binary(sql)} + + case DBConnection.prepare_execute(conn, query, map_params(params), opts) do + {:ok, %Exqlite.Query{}, result} -> + {:ok, result} + + {:error, %Exqlite.Error{}} = error -> + error + + {:error, err} -> + raise err + end + end + + @impl true + def execute(conn, query, params, opts) do + case DBConnection.execute(conn, query, map_params(params), opts) do + {:ok, _} = ok -> + ok + + {:error, %ArgumentError{} = err} -> + {:reset, err} + + {:error, %Exqlite.Error{}} = error -> + error + + {:error, err} -> + raise err + end + end + + @impl true + def query(conn, sql, params, opts) do + query = %Exqlite.Query{statement: sql} + DBConnection.execute(conn, query, params, opts) + end + + @impl true + def execute(conn, %Exqlite.Query{} = cached, params, opts) do + DBConnection.execute(conn, cached, params, opts) + end + + @impl true + def stream(conn, sql, params, opts) do + query = %Exqlite.Query{statement: sql} + %Exqlite.Stream{conn: conn, query: sql, params: params, options: opts} + end + + @impl true + def to_constraints(_, _), do: [] + + @impl true + def all(%Ecto.Query{lock: lock}) when lock != nil do + raise ArgumentError, "locks are not supported by SQLite" + end + + @impl true + def all(query, as_prefix \\ []) do + sources = create_names(query, as_prefix) + + cte = cte(query, sources) + from = from(query, sources) + select = select(query, sources) + join = join(query, sources) + where = where(query, sources) + group_by = group_by(query, sources) + having = having(query, sources) + window = window(query, sources) + combinations = combinations(query) + order_by = order_by(query, sources) + limit = limit(query, sources) + offset = offset(query, sources) + + [ + cte, + select, + from, + join, + where, + group_by, + having, + window, + combinations, + order_by, + limit, + offset + ] + end + + @impl true + def update_all(%Ecto.Query{joins: [_ | _]}) do + # TODO: It is supported but not in the traditional sense + raise ArgumentError, "JOINS are not supported on UPDATE statements by SQLite" + end + + @impl true + def update_all(query, prefix \\ nil) do + %{from: %{source: source}, select: select} = query + + if select do + raise ArgumentError, ":select is not supported in update_all by SQLite3" + end + + sources = create_names(query, []) + cte = cte(query, sources) + {from, name} = get_source(query, sources, 0, source) + + fields = + if prefix do + update_fields(:on_conflict, query, sources) + else + update_fields(:update, query, sources) + end + + {join, wheres} = using_join(query, :update_all, sources) + prefix = prefix || ["UPDATE ", from, " AS ", name, join, " SET "] + where = where(%{query | wheres: wheres ++ query.wheres}, sources) + + [cte, prefix, fields | where] + end + + @impl true + def delete_all(%Ecto.Query{joins: [_ | _]}) do + # TODO: It is supported but not in the traditional sense + raise ArgumentError, "JOINS are not supported on DELETE statements by SQLite" + end + + @impl true + def delete_all(query) do + if query.select do + raise ArgumentError, ":select is not supported in delete_all by SQLite3" + end + + sources = create_names(query, []) + cte = cte(query, sources) + {_, name, _} = elem(sources, 0) + + from = from(query, sources) + join = join(query, sources) + where = where(query, sources) + + [cte, "DELETE ", name, ".*", from, where] + end + + @impl true + def insert(prefix, table, header, rows, on_conflict, returning) do + insert(prefix, table, header, rows, on_conflict, [], []) + end + + def insert(prefix, table, header, rows, on_conflict, [], []) do + fields = quote_names(header) + + [ + "INSERT INTO ", + quote_table(prefix, table), + " (", + fields, + ") VALUES ", + insert_all(rows) | on_conflict(on_conflict, header) + ] + end + + def insert(_prefix, _table, _header, _rows, _on_conflict, _returning, []) do + raise ArgumentError, ":returning is not supported in insert/insert_all by SQLite3" + end + + def insert(_prefix, _table, _header, _rows, _on_conflict, _returning, _placeholders) do + raise ArgumentError, ":placeholders is not supported by SQLite3" + end + + @impl true + def update(prefix, table, fields, filters, _returning) do + fields = intersperse_map(fields, ", ", &[quote_name(&1), " = ?"]) + + filters = + intersperse_map(filters, " AND ", fn + {field, nil} -> + [quote_name(field), " IS NULL"] + + {field, _value} -> + [quote_name(field), " = ?"] + end) + + ["UPDATE ", quote_table(prefix, table), " SET ", fields, " WHERE " | filters] + end + + @impl true + def delete(prefix, table, filters, _returning) do + filters = + intersperse_map(filters, " AND ", fn + {field, nil} -> + [quote_name(field), " IS NULL"] + + {field, _value} -> + [quote_name(field), " = ?"] + end) + + ["DELETE FROM ", quote_table(prefix, table), " WHERE " | filters] + end + + @impl true + # DB explain opts are deprecated, so they aren't used to build the explain query. + # See Notes at https://dev.mysql.com/doc/refman/5.7/en/explain.html + def explain_query(conn, query, params, opts) do + case query(conn, build_explain_query(query), params, opts) do + {:ok, %Exqlite.Result{} = result} -> + {:ok, SQL.format_table(result)} + + error -> + error + end + end + + @impl true + def execute_ddl({_command, %Table{options: keyword}, _}) when is_list(keyword) do + raise ArgumentError, "SQLite adapter does not support keyword lists in :options" + end + + @impl true + def execute_ddl({command, %Table{} = table, columns}) + when command in [:create, :create_if_not_exists] do + table_structure = + case column_definitions(table, columns) ++ pk_definitions(columns, ", ") do + [] -> [] + list -> [?\s, ?(, list, ?)] + end + + [ + [ + "CREATE TABLE ", + if_do(command == :create_if_not_exists, "IF NOT EXISTS "), + quote_table(table.prefix, table.name), + table_structure, + options_expr(table.options) + ] + ] + end + + @impl true + def execute_ddl({command, %Table{} = table}) + when command in [:drop, :drop_if_exists] do + [ + [ + "DROP TABLE ", + if_do(command == :drop_if_exists, "IF EXISTS "), + quote_table(table.prefix, table.name) + ] + ] + end + + @impl true + def execute_ddl({:alter, %Table{} = table, changes}) do + [ + [ + "ALTER TABLE ", + quote_table(table.prefix, table.name), + ?\s, + column_changes(table, changes), + pk_definitions(changes, ", ADD ") + ] + ] + end + + @impl true + def execute_ddl({:create, %Index{} = index}) do + fields = intersperse_map(index.columns, ", ", &index_expr/1) + + [ + [ + "CREATE ", + if_do(index.unique, "UNIQUE "), + "INDEX", + ?\s, + quote_name(index.name), + " ON ", + quote_table(index.prefix, index.table), + ?\s, + ?(, + fields, + ?), + if_do(index.where, [" WHERE ", to_string(index.where)]) + ] + ] + end + + @impl true + def execute_ddl({:create_if_not_exists, %Index{} = index}) do + fields = intersperse_map(index.columns, ", ", &index_expr/1) + + [ + [ + "CREATE ", + if_do(index.unique, "UNIQUE "), + "INDEX IF NOT EXISTS", + ?\s, + quote_name(index.name), + " ON ", + quote_table(index.prefix, index.table), + ?\s, + ?(, + fields, + ?), + if_do(index.where, [" WHERE ", to_string(index.where)]) + ] + ] + end + + @impl true + def execute_ddl({:create, %Constraint{check: check}}) when is_binary(check) do + error!(nil, "SQLite3 adapter does not support check constraints") + end + + @impl true + def execute_ddl({:create, %Constraint{exclude: exclude}}) when is_binary(exclude) do + error!(nil, "SQLite3 adapter does not support exclusion constraints") + end + + @impl true + def execute_ddl({:drop, %Index{} = index}) do + [ + [ + "DROP INDEX ", + quote_table(index.prefix, index.name) + ] + ] + end + + @impl true + def execute_ddl({:drop_if_exists, %Index{} = index}) do + [ + [ + "DROP INDEX IF EXISTS ", + quote_table(index.prefix, index.name) + ] + ] + end + + @impl true + def execute_ddl({:drop, %Constraint{}}) do + error!(nil, "SQLite3 adapter does not support constraints") + end + + @impl true + def execute_ddl({:drop_if_exists, %Constraint{}}) do + error!(nil, "SQLite3 adapter does not support constraints") + end + + @impl true + def execute_ddl({:drop_if_exists, %Index{}}) do + error!(nil, "SQLite3 adapter does not support drop if exists for index") + end + + @impl true + def execute_ddl({:rename, %Table{} = current_table, %Table{} = new_table}) do + [ + [ + "ALTER TABLE ", + quote_table(current_table.prefix, current_table.name), + " RENAME TO ", + quote_table(new_table.prefix, new_table.name) + ] + ] + end + + @impl true + def execute_ddl({:rename, %Table{} = table, current_column, new_column}) do + [ + [ + "ALTER TABLE ", + quote_table(table.prefix, table.name), + " RENAME COLUMN ", + quote_name(current_column), + " TO ", + quote_name(new_column) + ] + ] + end + + @impl true + def execute_ddl(string) when is_binary(string), do: [string] + + @impl true + def execute_ddl(keyword) when is_list(keyword) do + error!(nil, "SQLite3 adapter does not support keyword lists in execute") + end + + @impl true + def ddl_logs(_), do: [] + + @impl true + def table_exists_query(table) do + {"SELECT name FROM sqlite_master WHERE type='table' AND name=? LIMIT 1", [table]} + end + + defp map_params(params) do + Enum.map(params, fn + %{__struct__: _} = data_type -> + {:ok, value} = Ecto.DataType.dump(data_type) + value + + %{} = value -> + Ecto.Adapter.json_library().encode!(value) + + value when is_list(value) -> + Ecto.Adapter.json_library().encode!(value) + + value -> + value + end) + end + + def build_explain_query(query) do + IO.iodata_to_binary(["EXPLAIN ", query]) + end + + ## + ## Query generation + ## + + defp strip_quotes(quoted) do + size = byte_size(quoted) - 2 + <<_, unquoted::binary-size(size), _>> = quoted + unquoted + end + + defp normalize_index_name(quoted, source) do + name = strip_quotes(quoted) + + if source do + String.trim_leading(name, "#{source}.") + else + name + end + end + + defp on_conflict({:raise, _, []}, _header), + do: [] + + defp on_conflict({:nothing, _, targets}, _header), + do: [" ON CONFLICT ", conflict_target(targets) | "DO NOTHING"] + + defp on_conflict({:replace_all, _, []}, _header), + do: raise(ArgumentError, "Upsert in SQLite requires :conflict_target") + + defp on_conflict({:replace_all, _, {:constraint, _}}, _header), + do: raise(ArgumentError, "Upsert in SQLite does not support ON CONSTRAINT") + + defp on_conflict({:replace_all, _, targets}, header), + do: [" ON CONFLICT ", conflict_target(targets), "DO " | replace_all(header)] + + defp on_conflict({query, _, targets}, _header), + do: [ + " ON CONFLICT ", + conflict_target(targets), + "DO " | update_all(query, "UPDATE SET ") + ] + + defp conflict_target([]), do: "" + + defp conflict_target(targets), + do: [?(, intersperse_map(targets, ?,, "e_name/1), ?), ?\s] + + defp replace_all(header) do + [ + "UPDATE SET " + | intersperse_map(header, ?,, fn field -> + quoted = quote_name(field) + [quoted, " = ", "EXCLUDED." | quoted] + end) + ] + end + + defp insert_all(rows) do + intersperse_map(rows, ?,, fn row -> + [?(, intersperse_map(row, ?,, &insert_all_value/1), ?)] + end) + end + + defp insert_all_value(nil), do: "DEFAULT" + + defp insert_all_value({%Ecto.Query{} = query, _params_counter}), + do: [?(, all(query), ?)] + + defp insert_all_value(_), do: '?' + + binary_ops = [ + ==: " = ", + !=: " != ", + <=: " <= ", + >=: " >= ", + <: " < ", + >: " > ", + +: " + ", + -: " - ", + *: " * ", + /: " / ", + and: " AND ", + or: " OR ", + like: " LIKE " + ] + + @binary_ops Keyword.keys(binary_ops) + + Enum.map(binary_ops, fn {op, str} -> + defp handle_call(unquote(op), 2), do: {:binary_op, unquote(str)} + end) + + defp handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} + + defp select(%{select: %{fields: fields}, distinct: distinct} = query, sources) do + ["SELECT ", distinct(distinct, sources, query) | select(fields, sources, query)] + end + + defp distinct(nil, _sources, _query), do: [] + defp distinct(%QueryExpr{expr: true}, _sources, _query), do: "DISTINCT " + defp distinct(%QueryExpr{expr: false}, _sources, _query), do: [] + + defp distinct(%QueryExpr{expr: exprs}, _sources, query) when is_list(exprs) do + error!(query, "DISTINCT with multiple columns is not supported by SQLite3") + end + + defp select([], _sources, _query), do: "TRUE" + + defp select(fields, sources, query) do + intersperse_map(fields, ", ", fn + {:&, _, [idx]} -> + case elem(sources, idx) do + {source, _, nil} -> + error!( + query, + "SQLite3 does not support selecting all fields from #{source} without a schema. " <> + "Please specify a schema or specify exactly which fields you want to select" + ) + + {_, source, _} -> + source + end + + {key, value} -> + [expr(value, sources, query), " AS ", quote_name(key)] + + value -> + expr(value, sources, query) + end) + end + + defp from(%{from: %{source: source, hints: hints}} = query, sources) do + {from, name} = get_source(query, sources, 0, source) + [" FROM ", from, " AS ", name | Enum.map(hints, &[?\s | &1])] + end + + defp cte( + %{with_ctes: %WithExpr{recursive: recursive, queries: [_ | _] = queries}} = + query, + sources + ) do + recursive_opt = if recursive, do: "RECURSIVE ", else: "" + ctes = intersperse_map(queries, ", ", &cte_expr(&1, sources, query)) + ["WITH ", recursive_opt, ctes, " "] + end + + defp cte(%{with_ctes: _}, _), do: [] + + defp cte_expr({name, cte}, sources, query) do + [quote_name(name), " AS ", cte_query(cte, sources, query)] + end + + defp cte_query(%Ecto.Query{} = query, _, _), do: ["(", all(query), ")"] + defp cte_query(%QueryExpr{expr: expr}, sources, query), do: expr(expr, sources, query) + + defp update_fields(type, %{updates: updates} = query, sources) do + fields = + for( + %{expr: expr} <- updates, + {op, kw} <- expr, + {key, value} <- kw, + do: update_op(op, update_key(type, key, query, sources), value, sources, query) + ) + + Enum.intersperse(fields, ", ") + end + + defp update_key(:update, key, %{from: from} = query, sources) do + {_from, name} = get_source(query, sources, 0, from) + + [name, ?. | quote_name(key)] + end + + defp update_key(:on_conflict, key, _query, _sources) do + quote_name(key) + end + + defp update_op(:set, quoted_key, value, sources, query) do + [quoted_key, " = " | expr(value, sources, query)] + end + + defp update_op(:inc, quoted_key, value, sources, query) do + [quoted_key, " = ", quoted_key, " + " | expr(value, sources, query)] + end + + defp update_op(command, _quoted_key, _value, _sources, query) do + error!(query, "Unknown update operation #{inspect(command)} for SQLite3") + end + + defp using_join(%{joins: []}, _kind, _sources), do: {[], []} + + defp using_join(%{joins: joins} = query, kind, sources) do + froms = + intersperse_map(joins, ", ", fn + %JoinExpr{qual: :inner, ix: ix, source: source} -> + {join, name} = get_source(query, sources, ix, source) + [join, " AS " | name] + + %JoinExpr{qual: qual} -> + error!( + query, + "SQLite3 adapter supports only inner joins on #{kind}, got: `#{qual}`" + ) + end) + + wheres = + for %JoinExpr{on: %QueryExpr{expr: value} = expr} <- joins, + value != true, + do: expr |> Map.put(:__struct__, BooleanExpr) |> Map.put(:op, :and) + + {[?,, ?\s | froms], wheres} + end + + defp join(%{joins: []}, _sources), do: [] + + defp join(%{joins: joins} = query, sources) do + Enum.map(joins, fn + %JoinExpr{ + on: %QueryExpr{expr: expr}, + qual: qual, + ix: ix, + source: source, + hints: hints + } -> + {join, name} = get_source(query, sources, ix, source) + + [ + join_qual(qual, query), + join, + " AS ", + name, + Enum.map(hints, &[?\s | &1]) | join_on(qual, expr, sources, query) + ] + end) + end + + defp join_on(:cross, true, _sources, _query), do: [] + defp join_on(_qual, expr, sources, query), do: [" ON " | expr(expr, sources, query)] + + defp join_qual(:inner, _), do: " INNER JOIN " + defp join_qual(:left, _), do: " LEFT OUTER JOIN " + defp join_qual(:right, _), do: " RIGHT OUTER JOIN " + defp join_qual(:full, _), do: " FULL OUTER JOIN " + defp join_qual(:cross, _), do: " CROSS JOIN " + + defp join_qual(mode, q), + do: error!(q, "join `#{inspect(mode)}` not supported by SQLite3") + + defp where(%{wheres: wheres} = query, sources) do + boolean(" WHERE ", wheres, sources, query) + end + + defp having(%{havings: havings} = query, sources) do + boolean(" HAVING ", havings, sources, query) + end + + defp group_by(%{group_bys: []}, _sources), do: [] + + defp group_by(%{group_bys: group_bys} = query, sources) do + [ + " GROUP BY " + | intersperse_map(group_bys, ", ", fn %QueryExpr{expr: expr} -> + intersperse_map(expr, ", ", &expr(&1, sources, query)) + end) + ] + end + + defp window(%{windows: []}, _sources), do: [] + + defp window(%{windows: windows} = query, sources) do + [ + " WINDOW " + | intersperse_map(windows, ", ", fn {name, %{expr: kw}} -> + [quote_name(name), " AS " | window_exprs(kw, sources, query)] + end) + ] + end + + defp window_exprs(kw, sources, query) do + [?(, intersperse_map(kw, ?\s, &window_expr(&1, sources, query)), ?)] + end + + defp window_expr({:partition_by, fields}, sources, query) do + ["PARTITION BY " | intersperse_map(fields, ", ", &expr(&1, sources, query))] + end + + defp window_expr({:order_by, fields}, sources, query) do + ["ORDER BY " | intersperse_map(fields, ", ", &order_by_expr(&1, sources, query))] + end + + defp window_expr({:frame, {:fragment, _, _} = fragment}, sources, query) do + expr(fragment, sources, query) + end + + defp order_by(%{order_bys: []}, _sources), do: [] + + defp order_by(%{order_bys: order_bys} = query, sources) do + [ + " ORDER BY " + | intersperse_map(order_bys, ", ", fn %QueryExpr{expr: expr} -> + intersperse_map(expr, ", ", &order_by_expr(&1, sources, query)) + end) + ] + end + + defp order_by_expr({dir, expr}, sources, query) do + str = expr(expr, sources, query) + + case dir do + :asc -> str + :desc -> [str | " DESC"] + _ -> error!(query, "#{dir} is not supported in ORDER BY in SQLite3") + end + end + + defp limit(%{limit: nil}, _sources), do: [] + + defp limit(%{limit: %QueryExpr{expr: expr}} = query, sources) do + [" LIMIT " | expr(expr, sources, query)] + end + + defp offset(%{offset: nil}, _sources), do: [] + + defp offset(%{offset: %QueryExpr{expr: expr}} = query, sources) do + [" OFFSET " | expr(expr, sources, query)] + end + + defp combinations(%{combinations: combinations}) do + Enum.map(combinations, fn + {:union, query} -> [" UNION (", all(query), ")"] + {:union_all, query} -> [" UNION ALL (", all(query), ")"] + {:except, query} -> [" EXCEPT (", all(query), ")"] + {:except_all, query} -> [" EXCEPT ALL (", all(query), ")"] + {:intersect, query} -> [" INTERSECT (", all(query), ")"] + {:intersect_all, query} -> [" INTERSECT ALL (", all(query), ")"] + end) + end + + defp lock(query, _sources) do + error!(query, "SQLite3 does not support locks") + end + + defp boolean(_name, [], _sources, _query), do: [] + + defp boolean(name, [%{expr: expr, op: op} | query_exprs], sources, query) do + [ + name, + Enum.reduce(query_exprs, {op, paren_expr(expr, sources, query)}, fn + %BooleanExpr{expr: expr, op: op}, {op, acc} -> + {op, [acc, operator_to_boolean(op) | paren_expr(expr, sources, query)]} + + %BooleanExpr{expr: expr, op: op}, {_, acc} -> + {op, + [?(, acc, ?), operator_to_boolean(op) | paren_expr(expr, sources, query)]} + end) + |> elem(1) + ] + end + + defp operator_to_boolean(:and), do: " AND " + defp operator_to_boolean(:or), do: " OR " + + defp parens_for_select([first_expr | _] = expr) do + if is_binary(first_expr) and String.match?(first_expr, ~r/^\s*select/i) do + [?(, expr, ?)] + else + expr + end + end + + defp paren_expr(expr, sources, query) do + [?(, expr(expr, sources, query), ?)] + end + + def expr({:^, [], [_ix]}, _sources, _query) do + '?' + end + + def expr( + {{:., _, [{:parent_as, _, [{:&, _, [idx]}]}, field]}, _, []}, + _sources, + query + ) + when is_atom(field) do + {_, name, _} = elem(query.aliases[@parent_as], idx) + [name, ?. | quote_name(field)] + end + + def expr({{:., _, [{:&, _, [idx]}, field]}, _, []}, sources, _query) + when is_atom(field) do + {_, name, _} = elem(sources, idx) + [name, ?. | quote_name(field)] + end + + def expr({:&, _, [idx]}, sources, _query) do + {_, source, _} = elem(sources, idx) + source + end + + def expr({:in, _, [_left, []]}, _sources, _query) do + "false" + end + + def expr({:in, _, [left, right]}, sources, query) when is_list(right) do + args = intersperse_map(right, ?,, &expr(&1, sources, query)) + [expr(left, sources, query), " IN (", args, ?)] + end + + def expr({:in, _, [_, {:^, _, [_, 0]}]}, _sources, _query) do + "false" + end + + def expr({:in, _, [left, {:^, _, [_, length]}]}, sources, query) do + args = Enum.intersperse(List.duplicate(??, length), ?,) + [expr(left, sources, query), " IN (", args, ?)] + end + + def expr({:in, _, [left, %Ecto.SubQuery{} = subquery]}, sources, query) do + [expr(left, sources, query), " IN ", expr(subquery, sources, query)] + end + + def expr({:in, _, [left, right]}, sources, query) do + [expr(left, sources, query), " = ANY(", expr(right, sources, query), ?)] + end + + def expr({:is_nil, _, [arg]}, sources, query) do + [expr(arg, sources, query) | " IS NULL"] + end + + def expr({:not, _, [expr]}, sources, query) do + ["NOT (", expr(expr, sources, query), ?)] + end + + def expr({:filter, _, _}, _sources, query) do + error!(query, "SQLite3 adapter does not support aggregate filters") + end + + def expr(%Ecto.SubQuery{query: query}, sources, _query) do + query = put_in(query.aliases[@parent_as], sources) + [?(, all(query, subquery_as_prefix(sources)), ?)] + end + + def expr({:fragment, _, [kw]}, _sources, query) + when is_list(kw) or tuple_size(kw) == 3 do + error!(query, "SQLite3 adapter does not support keyword or interpolated fragments") + end + + def expr({:fragment, _, parts}, sources, query) do + Enum.map(parts, fn + {:raw, part} -> part + {:expr, expr} -> expr(expr, sources, query) + end) + |> parens_for_select + end + + def expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do + [ + "CAST (", + "strftime('%Y-%m-%d %H:%M:%f000'", + ",", + expr(datetime, sources, query), + ",", + interval(count, interval, sources), + ") AS TEXT_DATETIME)" + ] + end + + def expr({:date_add, _, [date, count, interval]}, sources, query) do + [ + "CAST (", + "strftime('%Y-%m-%d'", + ",", + expr(date, sources, query), + ",", + interval(count, interval, sources), + ") AS TEXT_DATE)" + ] + end + + def expr({:ilike, _, [_, _]}, _sources, query) do + raise Ecto.QueryError, query: query, message: "ilike is not supported by SQLite3" + end + + def expr({:over, _, [agg, name]}, sources, query) when is_atom(name) do + aggregate = expr(agg, sources, query) + [aggregate, " OVER " | quote_name(name)] + end + + def expr({:over, _, [agg, kw]}, sources, query) do + aggregate = expr(agg, sources, query) + [aggregate, " OVER " | window_exprs(kw, sources, query)] + end + + def expr({:{}, _, elems}, sources, query) do + [?(, intersperse_map(elems, ?,, &expr(&1, sources, query)), ?)] + end + + def expr({:count, _, []}, _sources, _query), do: "count(*)" + + # Sqlite has some json operation support + # + def expr({:json_extract_path, _, [_expr, _path]}, _sources, query) do + raise Ecto.QueryError, + query: query, + message: "json_extract_path is not currently supported" + end + + # def expr({:json_extract_path, _, [expr, path]}, sources, query) do + # path = + # Enum.map(path, fn + # binary when is_binary(binary) -> + # [?., ?", escape_json_key(binary), ?"] + + # integer when is_integer(integer) -> + # "[#{integer}]" + # end) + + # ["json_extract(", expr(expr, sources, query), ", '$", path, "')"] + # end + + def expr({fun, _, args}, sources, query) when is_atom(fun) and is_list(args) do + {modifier, args} = + case args do + [rest, :distinct] -> {"DISTINCT ", [rest]} + _ -> {[], args} + end + + case handle_call(fun, length(args)) do + {:binary_op, op} -> + [left, right] = args + [op_to_binary(left, sources, query), op | op_to_binary(right, sources, query)] + + {:fun, fun} -> + [fun, ?(, modifier, intersperse_map(args, ", ", &expr(&1, sources, query)), ?)] + end + end + + def expr(list, _sources, query) when is_list(list) do + error!(query, "Array type is not supported by SQLite3") + end + + def expr(%Decimal{} = decimal, _sources, _query) do + Decimal.to_string(decimal, :normal) + end + + def expr(%Ecto.Query.Tagged{value: binary, type: :binary}, _sources, _query) + when is_binary(binary) do + hex = Base.encode16(binary, case: :lower) + [?x, ?', hex, ?'] + end + + def expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) + when type in [:decimal, :float] do + [expr(other, sources, query), " + 0"] + end + + def expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) do + ["CAST(", expr(other, sources, query), " AS ", ecto_cast_to_db(type, query), ?)] + end + + def expr(nil, _sources, _query), do: "NULL" + def expr(true, _sources, _query), do: "TRUE" + def expr(false, _sources, _query), do: "FALSE" + + def expr(literal, _sources, _query) when is_binary(literal) do + [?', escape_string(literal), ?'] + end + + def expr(literal, _sources, _query) when is_integer(literal) do + Integer.to_string(literal) + end + + def expr(literal, _sources, _query) when is_float(literal) do + # Unsure if SQLite3 supports float casting + ["(0 + ", Float.to_string(literal), ?)] + end + + defp interval(_, "microsecond", _sources) do + raise ArgumentError, + "SQLite does not support microsecond precision in datetime intervals" + end + + defp interval(count, "millisecond", sources) do + "(#{expr(count, sources, nil)} / 1000.0) || ' seconds'" + end + + defp interval(count, "week", sources) do + "(#{expr(count, sources, nil)} * 7) || ' days'" + end + + defp interval(count, interval, sources) do + "#{expr(count, sources, nil)} || ' #{interval}'" + end + + defp op_to_binary({op, _, [_, _]} = expr, sources, query) when op in @binary_ops, + do: paren_expr(expr, sources, query) + + defp op_to_binary({:is_nil, _, [_]} = expr, sources, query), + do: paren_expr(expr, sources, query) + + defp op_to_binary(expr, sources, query), + do: expr(expr, sources, query) + + def create_names(query), do: create_names(query, []) + + def create_names(%{sources: sources}, as_prefix) do + create_names(sources, 0, tuple_size(sources), as_prefix) |> List.to_tuple() + end + + def create_names(sources, pos, limit, as_prefix) when pos < limit do + [ + create_name(sources, pos, as_prefix) + | create_names(sources, pos + 1, limit, as_prefix) + ] + end + + def create_names(_sources, pos, pos, as_prefix) do + [as_prefix] + end + + defp subquery_as_prefix(sources) do + [?s | :erlang.element(tuple_size(sources), sources)] + end + + def create_name(sources, pos, as_prefix) do + case elem(sources, pos) do + {:fragment, _, _} -> + {nil, as_prefix ++ [?f | Integer.to_string(pos)], nil} + + {table, schema, prefix} -> + name = as_prefix ++ [create_alias(table) | Integer.to_string(pos)] + {quote_table(prefix, table), name, schema} + + %Ecto.SubQuery{} -> + {nil, as_prefix ++ [?s | Integer.to_string(pos)], nil} + end + end + + def create_alias(<>) + when first in ?a..?z + when first in ?A..?Z do + first + end + + def create_alias(_) do + ?t + end + + defp pk_definitions(columns, prefix) do + pks = + for {_, name, _, opts} <- columns, + opts[:primary_key], + do: name + + case pks do + [] -> [] + _ -> [[prefix, "PRIMARY KEY (", quote_names(pks), ?)]] + end + end + + defp column_definitions(table, columns) do + intersperse_map(columns, ", ", &column_definition(table, &1)) + end + + defp column_definition(table, {:add, name, %Reference{} = ref, opts}) do + [ + quote_name(name), + ?\s, + reference_column_type(ref.type, opts), + column_options(opts), + reference_expr(ref, table, name) + ] + end + + defp column_definition(_table, {:add, name, type, opts}) do + [quote_name(name), ?\s, column_type(type, opts), column_options(opts)] + end + + defp column_changes(table, columns) do + intersperse_map(columns, ", ", &column_change(table, &1)) + end + + defp column_change(_table, {_command, _name, %Reference{validate: false}, _opts}) do + error!(nil, "validate: false on references is not supported in Exqlite") + end + + defp column_change(table, {:add, name, %Reference{} = ref, opts}) do + [ + "ADD ", + quote_name(name), + ?\s, + reference_column_type(ref.type, opts), + column_options(opts), + constraint_expr(ref, table, name) + ] + end + + defp column_change(_table, {:add, name, type, opts}) do + ["ADD ", quote_name(name), ?\s, column_type(type, opts), column_options(opts)] + end + + defp column_change(table, {:add_if_not_exists, name, %Reference{} = ref, opts}) do + [ + "ADD IF NOT EXISTS ", + quote_name(name), + ?\s, + reference_column_type(ref.type, opts), + column_options(opts), + constraint_if_not_exists_expr(ref, table, name) + ] + end + + defp column_change(_table, {:add_if_not_exists, name, type, opts}) do + [ + "ADD IF NOT EXISTS ", + quote_name(name), + ?\s, + column_type(type, opts), + column_options(opts) + ] + end + + defp column_change(_table, {:modify, _name, _type, _opts}) do + raise ArgumentError, "ALTER COLUMN not supported by SQLite" + end + + defp column_change(_table, {:remove, _name, _type, _opts}) do + raise ArgumentError, "ALTER COLUMN not supported by SQLite" + end + + defp column_change(_table, {:remove, :summary}) do + raise ArgumentError, "DROP COLUMN not supported by SQLite" + end + + defp column_change(_table, {:remove, name, _type, _opts}), + do: ["DROP ", quote_name(name)] + + defp column_change(table, {:remove_if_exists, name, %Reference{} = ref}) do + [ + drop_constraint_if_exists_expr(ref, table, name), + "DROP IF EXISTS ", + quote_name(name) + ] + end + + defp column_change(_table, {:remove_if_exists, name, _type}), + do: ["DROP IF EXISTS ", quote_name(name)] + + defp column_options(opts) do + default = Keyword.fetch(opts, :default) + null = Keyword.get(opts, :null) + after_column = Keyword.get(opts, :after) + + [default_expr(default), null_expr(null), after_expr(after_column)] + end + + defp after_expr(nil), do: [] + + defp after_expr(column) when is_atom(column) or is_binary(column), + do: " AFTER `#{column}`" + + defp after_expr(_), do: [] + + defp null_expr(false), do: " NOT NULL" + defp null_expr(true), do: " NULL" + defp null_expr(_), do: [] + + defp default_expr({:ok, nil}), + do: " DEFAULT NULL" + + defp default_expr({:ok, literal}) when is_binary(literal), + do: [" DEFAULT '", escape_string(literal), ?'] + + defp default_expr({:ok, literal}) when is_number(literal) or is_boolean(literal), + do: [" DEFAULT ", to_string(literal)] + + defp default_expr({:ok, {:fragment, expr}}), + do: [" DEFAULT ", expr] + + defp default_expr({:ok, value}) when is_map(value) do + library = Application.get_env(:myxql, :json_library, Jason) + expr = IO.iodata_to_binary(library.encode_to_iodata!(value)) + [" DEFAULT ", ?(, ?', escape_string(expr), ?', ?)] + end + + defp default_expr(:error), + do: [] + + defp index_expr(literal) when is_binary(literal), + do: literal + + defp index_expr(literal), do: quote_name(literal) + + defp options_expr(nil), + do: [] + + defp options_expr(keyword) when is_list(keyword), + do: error!(nil, "SQLite3 adapter does not support keyword lists in :options") + + defp options_expr(options), + do: [?\s, to_string(options)] + + # Simple column types. Note that we ignore options like :size, :precision, + # etc. because columns do not have types, and SQLite will not coerce any + # stored value. Thus, "strings" are all text and "numerics" have arbitrary + # precision regardless of the declared column type. Decimals are the + # only exception. + defp column_type(:serial, _opts), do: "INTEGER" + defp column_type(:bigserial, _opts), do: "INTEGER" + defp column_type(:string, _opts), do: "TEXT" + defp column_type(:map, _opts), do: "TEXT" + defp column_type({:map, _}, _opts), do: "JSON" + defp column_type({:array, _}, _opts), do: "JSON" + + defp column_type(:decimal, opts) do + # We only store precision and scale for DECIMAL. + precision = Keyword.get(opts, :precision) + scale = Keyword.get(opts, :scale, 0) + + decimal_column_type(precision, scale) + end + + defp column_type(type, _opts), do: type |> Atom.to_string() |> String.upcase() + + defp decimal_column_type(precision, scale) when is_integer(precision), + do: "DECIMAL(#{precision},#{scale})" + + defp decimal_column_type(_precision, _scale), do: "DECIMAL" + + defp reference_expr(type, ref, table, name) do + {current_columns, reference_columns} = Enum.unzip([{name, ref.column} | ref.with]) + + if ref.match do + error!(nil, ":match is not supported in references for tds") + end + + [ + "CONSTRAINT ", + reference_name(ref, table, name), + " ", + type, + " (", + quote_names(current_columns), + ?), + " REFERENCES ", + quote_table(ref.prefix || table.prefix, ref.table), + ?(, + quote_names(reference_columns), + ?), + reference_on_delete(ref.on_delete), + reference_on_update(ref.on_update) + ] + end + + defp reference_expr(%Reference{} = ref, table, name), + do: [", " | reference_expr("FOREIGN KEY", ref, table, name)] + + defp constraint_expr(%Reference{} = ref, table, name), + do: [", ADD " | reference_expr("FOREIGN KEY", ref, table, name)] + + defp constraint_if_not_exists_expr(%Reference{} = ref, table, name), + do: [", ADD " | reference_expr("FOREIGN KEY IF NOT EXISTS", ref, table, name)] + + defp drop_constraint_expr(%Reference{} = ref, table, name), + do: ["DROP FOREIGN KEY ", reference_name(ref, table, name), ", "] + + defp drop_constraint_expr(_, _, _), do: [] + + defp drop_constraint_if_exists_expr(%Reference{} = ref, table, name), + do: ["DROP FOREIGN KEY IF EXISTS ", reference_name(ref, table, name), ", "] + + defp drop_constraint_if_exists_expr(_, _, _), do: [] + + defp reference_name(%Reference{name: nil}, table, column), + do: quote_name("#{table.name}_#{column}_fkey") + + defp reference_name(%Reference{name: name}, _table, _column), + do: quote_name(name) + + defp reference_column_type(:serial, _opts), do: "INTEGER" + defp reference_column_type(:bigserial, _opts), do: "INTEGER" + defp reference_column_type(type, opts), do: column_type(type, opts) + + defp reference_on_delete(:nilify_all), do: " ON DELETE SET NULL" + defp reference_on_delete(:delete_all), do: " ON DELETE CASCADE" + defp reference_on_delete(:restrict), do: " ON DELETE RESTRICT" + defp reference_on_delete(_), do: [] + + defp reference_on_update(:nilify_all), do: " ON UPDATE SET NULL" + defp reference_on_update(:update_all), do: " ON UPDATE CASCADE" + defp reference_on_update(:restrict), do: " ON UPDATE RESTRICT" + defp reference_on_update(_), do: [] + + ## + ## Helpers + ## + + defp get_source(query, sources, ix, source) do + {expr, name, _schema} = elem(sources, ix) + {expr || expr(source, sources, query), name} + end + + defp quote_names(names), do: intersperse_map(names, ?,, "e_name/1) + + def quote_name(name), do: quote_entity(name) + + def quote_table(table), do: quote_entity(table) + + defp quote_table(nil, name), do: quote_entity(name) + defp quote_table(prefix, name), do: [quote_entity(prefix), ?., quote_entity(name)] + + defp quote_entity(val) when is_atom(val), do: quote_entity(Atom.to_string(val)) + defp quote_entity(val), do: [val] + + defp intersperse_map(list, separator, mapper, acc \\ []) + + defp intersperse_map([], _separator, _mapper, acc), + do: acc + + defp intersperse_map([elem], _separator, mapper, acc), + do: [acc | mapper.(elem)] + + defp intersperse_map([elem | rest], separator, mapper, acc), + do: intersperse_map(rest, separator, mapper, [acc, mapper.(elem), separator]) + + defp if_do(condition, value) do + if condition, do: value, else: [] + end + + defp escape_string(value) when is_binary(value) do + value + |> :binary.replace("'", "''", [:global]) + |> :binary.replace("\\", "\\\\", [:global]) + end + + defp escape_json_key(value) when is_binary(value) do + value + |> escape_string() + |> :binary.replace("\"", "\\\\\"", [:global]) + end + + defp ecto_cast_to_db(:id, _query), do: "INTEGER" + defp ecto_cast_to_db(:integer, _query), do: "INTEGER" + defp ecto_cast_to_db(:string, _query), do: "TEXT" + defp ecto_cast_to_db(type, query), do: ecto_to_db(type, query) + + defp ecto_to_db(type, query \\ nil) + + defp ecto_to_db({:array, _}, query), + do: error!(query, "Array type is not supported by SQLite3") + + defp ecto_to_db(:id, _query), do: "INTEGER" + defp ecto_to_db(:serial, _query), do: "INTEGER" + defp ecto_to_db(:bigserial, _query), do: "INTEGER" + # TODO: We should make this configurable + defp ecto_to_db(:binary_id, _query), do: "TEXT" + defp ecto_to_db(:string, _query), do: "TEXT" + defp ecto_to_db(:float, _query), do: "NUMERIC" + defp ecto_to_db(:binary, _query), do: "BLOB" + # TODO: We should make this configurable + # SQLite3 does not support uuid + defp ecto_to_db(:uuid, _query), do: "TEXT" + defp ecto_to_db(:map, _query), do: "TEXT" + defp ecto_to_db({:map, _}, _query), do: "TEXT" + defp ecto_to_db(:utc_datetime, _query), do: "TEXT_DATETIME" + defp ecto_to_db(:utc_datetime_usec, _query), do: "TEXT_DATETIME" + defp ecto_to_db(:naive_datetime, _query), do: "TEXT_DATETIME" + defp ecto_to_db(:naive_datetime_usec, _query), do: "TEXT_DATETIME" + defp ecto_to_db(atom, _query) when is_atom(atom), do: Atom.to_string(atom) + defp ecto_to_db(str, _query) when is_binary(str), do: str + + defp ecto_to_db(type, _query) do + raise ArgumentError, + "unsupported type `#{inspect(type)}`. The type can either be an atom, a string " <> + "or a tuple of the form `{:map, t}` where `t` itself follows the same conditions." + end + + defp error!(nil, message) do + raise ArgumentError, message + end + + defp error!(query, message) do + raise Ecto.QueryError, query: query, message: message + end +end diff --git a/lib/exqlite/connection.ex b/lib/exqlite/connection.ex new file mode 100644 index 00000000..cc984f21 --- /dev/null +++ b/lib/exqlite/connection.ex @@ -0,0 +1,310 @@ +defmodule Exqlite.Connection do + use DBConnection + + defstruct db: nil, + db_path: nil, + transaction_status: :idle + + alias Exqlite.Error + alias Exqlite.Result + alias Exqlite.Sqlite3 + + @doc """ + Checks in the state to the connection process. Return `{:ok, state}` + to allow the checkin or `{:disconnect, exception, state}` to disconnect. + + This callback is called when the control of the state is passed back + to the connection process. It should reverse any changes to the + connection state made in `c:checkout/1`. + + This callback is called in the connection process. + """ + @impl true + def checkin(state), do: {:ok, state} + + @doc """ + Checkouts the state from the connection process. Return `{:ok, state}` + to allow the checkout or `{:disconnect, exception, state}` to disconnect. + + This callback is called when the control of the state is passed to + another process. `c:checkin/1` is called with the new state when control + is returned to the connection process. + + This callback is called in the connection process. + """ + @impl true + def checkout(state), do: {:ok, state} + + @doc """ + Called when the connection has been idle for a period of time. Return + `{:ok, state}` to continue or `{:disconnect, exception, state}` to + disconnect. + + This callback is called if no callbacks have been called after the + idle timeout and a client process is not using the state. The idle + timeout can be configured by the `:idle_interval` option. This function + can be called whether the connection is checked in or checked out. + + This callback is called in the connection process. + """ + @impl true + def ping(state), do: {:ok, state} + + @doc """ + Connect to the database. Return `{:ok, state}` on success or + `{:error, exception}` on failure. + + If an error is returned it will be logged and another + connection attempt will be made after a backoff interval. + + This callback is called in the connection process. + """ + @impl true + def connect(options) do + db_path = Keyword.fetch!(options, :database) + + case Sqlite3.open(db_path) do + {:ok, db} -> + { + :ok, + %__MODULE__{ + db: db, + db_path: db_path, + transaction_status: :idle + } + } + + {:error, reason} -> + {:error, %Error{message: reason}} + + _ -> + {:error, %Error{message: "unknown"}} + end + end + + @doc """ + Disconnect from the database. Return `:ok`. + + The exception as first argument is the exception from a `:disconnect` + 3-tuple returned by a previous callback. + + If the state is controlled by a client and it exits or takes too long + to process a request the state will be last known state. In these + cases the exception will be a `DBConnection.ConnectionError`. + + This callback is called in the connection process. + """ + @impl true + def disconnect(_error, %{db: db}) do + Sqlite3.close(db) + :ok + end + + @doc """ + Handle the beginning of a transaction. + + Return `{:ok, result, state}` to continue, `{status, state}` to notify caller + that the transaction can not begin due to the transaction status `status`, + `{:error, exception, state}` (deprecated) to error without beginning the + transaction, or `{:disconnect, exception, state}` to error and disconnect. + + A callback implementation should only return `status` if it + can determine the database's transaction status without side effect. + + This callback is called in the client process. + """ + @impl true + def handle_begin(options, %{transaction_status: status} = state) do + case Keyword.get(options, :mode, :transaction) do + :transaction when status == :idle -> + handle_transaction(:begin, "BEGIN", state) + + :savepoint when status == :transaction -> + handle_transaction(:begin, "SAVEPOINT exqlite_savepoint", state) + + mode when mode in [:transaction, :savepoint] -> + {status, state} + end + end + + @doc """ + Close a query prepared by `c:handle_prepare/3` with the database. Return + `{:ok, result, state}` on success and to continue, + `{:error, exception, state}` to return an error and continue, or + `{:disconnect, exception, state}` to return an error and disconnect. + + This callback is called in the client process. + """ + @impl true + def handle_close(_query, _opts, state) do + {:ok, nil, state} + end + + @doc """ + Handle committing a transaction. Return `{:ok, result, state}` on successfully + committing transaction, `{status, state}` to notify caller that the + transaction can not commit due to the transaction status `status`, + `{:error, exception, state}` (deprecated) to error and no longer be inside + transaction, or `{:disconnect, exception, state}` to error and disconnect. + + A callback implementation should only return `status` if it + can determine the database's transaction status without side effect. + This callback is called in the client process. + """ + @impl true + def handle_commit(options, %{transaction_status: status} = state) do + case Keyword.get(options, :mode, :transaction) do + :transaction when status == :transaction -> + handle_transaction(:commit, "COMMIT", state) + + :savepoint when status == :transaction -> + handle_transaction(:commit, "RELEASE SAVEPOINT exqlite_savepoint", state) + + mode when mode in [:transaction, :savepoint] -> + {status, state} + end + end + + @doc """ + Deallocate a cursor declared by `c:handle_declare/4` with the database. Return + `{:ok, result, state}` on success and to continue, + `{:error, exception, state}` to return an error and continue, or + `{:disconnect, exception, state}` to return an error and disconnect. + + This callback is called in the client process. + """ + @impl true + def handle_deallocate(_query, _cursor, _opts, state) do + {:error, %Error{message: "cursors not supported"}, state} + end + + @doc """ + Declare a cursor using a query prepared by `c:handle_prepare/3`. Return + `{:ok, query, cursor, state}` to return altered query `query` and cursor + `cursor` for a stream and continue, `{:error, exception, state}` to return an + error and continue or `{:disconnect, exception, state}` to return an error + and disconnect. + + This callback is called in the client process. + """ + @impl true + def handle_declare(_query, _cursor, _opts, state) do + # TODO: Explore building cursor like support + {:error, %Error{message: "cursors not supported"}, state} + end + + @doc """ + Execute a query prepared by `c:handle_prepare/3`. Return + `{:ok, query, result, state}` to return altered query `query` and result + `result` and continue, `{:error, exception, state}` to return an error and + continue or `{:disconnect, exception, state}` to return an error and + disconnect. + + This callback is called in the client process. + """ + @impl true + def handle_execute(_query, _params, _opts, state) do + {:ok, nil, state} + end + + @doc """ + Fetch the next result from a cursor declared by `c:handle_declare/4`. Return + `{:cont, result, state}` to return the result `result` and continue using + cursor, `{:halt, result, state}` to return the result `result` and close the + cursor, `{:error, exception, state}` to return an error and close the + cursor, `{:disconnect, exception, state}` to return an error and disconnect. + + This callback is called in the client process. + """ + @impl true + def handle_fetch(_query, _cursor, _opts, state) do + {:error, :cursors_not_supported, state} + end + + @doc """ + Prepare a query with the database. Return `{:ok, query, state}` where + `query` is a query to pass to `execute/4` or `close/3`, + `{:error, exception, state}` to return an error and continue or + `{:disconnect, exception, state}` to return an error and disconnect. + + This callback is intended for cases where the state of a connection is + needed to prepare a query and/or the query can be saved in the + database to call later. + + This callback is called in the client process. + """ + @impl true + def handle_prepare(_query, _opts, state) do + {:ok, nil, state} + end + + @doc """ + Handle rolling back a transaction. Return `{:ok, result, state}` on successfully + rolling back transaction, `{status, state}` to notify caller that the + transaction can not rollback due to the transaction status `status`, + `{:error, exception, state}` (deprecated) to + error and no longer be inside transaction, or + `{:disconnect, exception, state}` to error and disconnect. + + A callback implementation should only return `status` if it + can determine the database' transaction status without side effect. + + This callback is called in the client and connection process. + """ + @impl true + def handle_rollback(options, %{transaction_status: transaction_status} = state) do + case Keyword.get(options, :mode, :transaction) do + :transaction when transaction_status == :transaction -> + handle_transaction(:rollback, "ROLLBACK", state) + + :savepoint when transaction_status == :transaction -> + with {:ok, _result, state} <- + handle_transaction( + :rollback, + "ROLLBACK TO SAVEPOINT exqlite_savepoint", + state + ) do + handle_transaction(:rollback, "RELEASE SAVEPOINT exqlite_savepoint", state) + end + + mode when mode in [:transaction, :savepoint] -> + {transaction_status, state} + end + end + + @doc """ + Handle getting the transaction status. Return `{:idle, state}` if outside a + transaction, `{:transaction, state}` if inside a transaction, + `{:error, state}` if inside an aborted transaction, or + `{:disconnect, exception, state}` to error and disconnect. + + If the callback returns a `:disconnect` tuples then `status/2` will return + `:error`. + """ + @impl true + def handle_status(_opts, state) do + {:idle, state} + end + + defp handle_transaction(command, sql, %{db: db} = state) do + case Sqlite3.execute(db, sql) do + :ok -> + { + :ok, + %Result{ + rows: nil, + num_rows: nil, + columns: nil, + command: command + }, + state + } + + {:error, reason} -> + {:error, %Error{message: reason}, state} + + _ -> + {:error, %Error{message: "something went wrong"}, state} + end + end +end diff --git a/lib/exqlite/error.ex b/lib/exqlite/error.ex index 22a86167..8553f098 100644 --- a/lib/exqlite/error.ex +++ b/lib/exqlite/error.ex @@ -1,4 +1,3 @@ defmodule Exqlite.Error do - @moduledoc false - defexception [:message] + defexception [:message, :sqlite3] end diff --git a/lib/exqlite/query.ex b/lib/exqlite/query.ex new file mode 100644 index 00000000..f545dce0 --- /dev/null +++ b/lib/exqlite/query.ex @@ -0,0 +1,129 @@ +defmodule Exqlite.Query do + defstruct [:name, :statement, :prepared] + + defimpl DBConnection.Query do + def parse(%{name: name} = query, _) do + %{query | name: IO.iodata_to_binary(name)} + end + + def describe(query, _), do: query + + def encode(_query, params, _opts), do: params + + def decode(_query, %Exqlite.Result{rows: nil} = res, _opts), do: res + + def decode( + %Exqlite.Query{prepared: %{types: types}}, + %Exqlite.Result{rows: rows, columns: columns} = res, + opts + ) do + mapper = opts[:decode_mapper] + decoded_rows = Enum.map(rows, &decode_row(&1, types, columns, mapper)) + %{res | rows: decoded_rows} + end + + ## Helpers + + defp decode_row(row, types, column_names, nil) do + row + |> Enum.zip(types) + |> Enum.map(&translate_value/1) + |> Enum.zip(column_names) + |> cast_any_datetimes + end + + defp decode_row(row, types, column_names, mapper) do + mapper.(decode_row(row, types, column_names, nil)) + end + + defp translate_value({:undefined, _type}), do: nil + defp translate_value({{:blob, blob}, _type}), do: blob + defp translate_value({{:text, text}, _type}), do: text + + defp translate_value({"", "date"}), do: nil + defp translate_value({date, "date"}) when is_binary(date), do: to_date(date) + + defp translate_value({"", "time"}), do: nil + defp translate_value({time, "time"}) when is_binary(time), do: to_time(time) + + defp translate_value({"", "datetime"}), do: nil + + defp translate_value({0, "boolean"}), do: false + defp translate_value({1, "boolean"}), do: true + + defp translate_value({int, type = <<"decimal", _::binary>>}) when is_integer(int) do + {result, _} = int |> Integer.to_string() |> Float.parse() + translate_value({result, type}) + end + + defp translate_value({float, "decimal"}), do: Decimal.from_float(float) + + defp translate_value({float, "decimal(" <> rest}) do + [precision, scale] = + rest + |> String.trim_trailing(")") + |> String.split(",") + |> Enum.map(&String.to_integer/1) + + Decimal.with_context( + %Decimal.Context{precision: precision, rounding: :down}, + fn -> + float |> Float.round(scale) |> Decimal.from_float() |> Decimal.plus() + end + ) + end + + defp translate_value({value, _type}), do: value + + defp to_date(date) do + <> = date + {String.to_integer(yr), String.to_integer(mo), String.to_integer(da)} + end + + defp to_time( + <> + ) + when byte_size(fr) <= 6 do + fr = String.to_integer(fr <> String.duplicate("0", 6 - String.length(fr))) + {String.to_integer(hr), String.to_integer(mi), String.to_integer(se), fr} + end + + # We use a special conversion for when the user is trying to cast to a + # DATETIME type. We introduce a TEXT_DATETIME psudo-type to preserve the + # datetime string. When we get here, we look for a CAST function as a signal + # to convert that back to Elixir date types. + defp cast_any_datetimes(row) do + Enum.map(row, fn {value, column_name} -> + if String.contains?(column_name, "CAST (") && + String.contains?(column_name, "TEXT_DATE") do + string_to_datetime(value) + else + value + end + end) + end + + defp string_to_datetime( + <> + ) do + {String.to_integer(yr), String.to_integer(mo), String.to_integer(da)} + end + + defp string_to_datetime(str) do + <> = str + + {{String.to_integer(yr), String.to_integer(mo), String.to_integer(da)}, + {String.to_integer(hr), String.to_integer(mi), String.to_integer(se), + String.to_integer(fr)}} + end + end + + defimpl String.Chars do + def to_string(%{query: query}) do + IO.iodata_to_binary(query) + end + end +end diff --git a/lib/exqlite/result.ex b/lib/exqlite/result.ex index 37ec1dae..9de2a221 100644 --- a/lib/exqlite/result.ex +++ b/lib/exqlite/result.ex @@ -1,4 +1,15 @@ defmodule Exqlite.Result do + @moduledoc """ + Result struct returned from any successful query. Its fields are: + + * `command` - An atom of the query command, for example: `:select` or + `:insert`; + * `columns` - The column names; + * `rows` - The result set. A list of tuples, each tuple corresponding to a + row, each element in the tuple corresponds to a column; + * `num_rows` - The number of fetched or affected rows; + """ + @type t :: %__MODULE__{ command: atom, columns: [String.t()] | nil, diff --git a/lib/exqlite/stream.ex b/lib/exqlite/stream.ex new file mode 100644 index 00000000..dde8a436 --- /dev/null +++ b/lib/exqlite/stream.ex @@ -0,0 +1,36 @@ +defmodule Exqlite.Stream do + @moduledoc false + defstruct [:conn, :query, :params, :options, max_rows: 500] + @type t :: %Exqlite.Stream{} + + defimpl Enumerable do + def reduce( + %Exqlite.Stream{query: statement, conn: conn, params: params, options: opts}, + acc, + fun + ) + when is_binary(statement) do + query = %Exqlite.Query{name: "", statement: statement} + + case DBConnection.prepare_execute(conn, query, params, opts) do + {:ok, _, %{rows: _rows} = result} -> + Enumerable.reduce([result], acc, fun) + + {:error, err} -> + raise err + end + end + + def member?(_, _) do + {:error, __MODULE__} + end + + def count(_) do + {:error, __MODULE__} + end + + def slice(_) do + {:error, __MODULE__} + end + end +end diff --git a/mix.exs b/mix.exs index 03b69e34..f459271a 100644 --- a/mix.exs +++ b/mix.exs @@ -4,17 +4,19 @@ defmodule Exqlite.MixProject do def project do [ app: :exqlite, - version: "0.1.1", - elixir: "~> 1.11", compilers: [:elixir_make] ++ Mix.compilers(), - make_targets: ["all"], - make_clean: ["clean"], - start_permanent: Mix.env() == :prod, - source_url: "https://github.com/warmwaffles/exqlite", - homepage_url: "https://github.com/warmwaffles/exqlite", deps: deps(), + description: description(), + elixir: "~> 1.11", + elixirc_paths: elixirc_paths(Mix.env()), + homepage_url: "https://github.com/warmwaffles/exqlite", + make_clean: ["clean"], + make_targets: ["all"], package: package(), - description: description() + source_url: "https://github.com/warmwaffles/exqlite", + start_permanent: Mix.env() == :prod, + test_paths: test_paths(), + version: "0.1.1" ] end @@ -33,6 +35,7 @@ defmodule Exqlite.MixProject do {:ecto_sql, "~> 3.5.4"}, {:elixir_make, "~> 0.6", runtime: false}, {:ex_doc, "~> 0.23.0", only: [:dev], runtime: false}, + {:jason, ">= 0.0.0", only: [:test, :docs]}, {:temp, "~> 0.4", only: [:test]} ] end @@ -51,4 +54,9 @@ defmodule Exqlite.MixProject do } ] end + + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] + + defp test_paths, do: ["test"] end diff --git a/mix.lock b/mix.lock index 062effe9..975624b5 100644 --- a/mix.lock +++ b/mix.lock @@ -8,6 +8,7 @@ "elixir_make": {:hex, :elixir_make, "0.6.2", "7dffacd77dec4c37b39af867cedaabb0b59f6a871f89722c25b28fcd4bd70530", [:mix], [], "hexpm", "03e49eadda22526a7e5279d53321d1cced6552f344ba4e03e619063de75348d9"}, "esqlite": {:hex, :esqlite, "0.4.1", "ba5d0bab6b9c8432ffe1bf12fee8e154a50f1c3c40eadc3a9c870c23ca94d961", [:rebar3], [], "hexpm", "3584ca33172f4815ce56e96eed9835f5d8c987a9000fbc8c376c86acef8bf965"}, "ex_doc": {:hex, :ex_doc, "0.23.0", "a069bc9b0bf8efe323ecde8c0d62afc13d308b1fa3d228b65bca5cf8703a529d", [:mix], [{:earmark_parser, "~> 1.4.0", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}], "hexpm", "f5e2c4702468b2fd11b10d39416ddadd2fcdd173ba2a0285ebd92c39827a5a16"}, + "jason": {:hex, :jason, "1.2.2", "ba43e3f2709fd1aa1dce90aaabfd039d000469c05c56f0b8e31978e03fa39052", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "18a228f5f0058ee183f29f9eae0805c6e59d61c3b006760668d8d18ff0d12179"}, "makeup": {:hex, :makeup, "1.0.5", "d5a830bc42c9800ce07dd97fa94669dfb93d3bf5fcf6ea7a0c67b2e0e4a7f26c", [:mix], [{:nimble_parsec, "~> 0.5 or ~> 1.0", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "cfa158c02d3f5c0c665d0af11512fed3fba0144cf1aadee0f2ce17747fba2ca9"}, "makeup_elixir": {:hex, :makeup_elixir, "0.15.1", "b5888c880d17d1cc3e598f05cdb5b5a91b7b17ac4eaf5f297cb697663a1094dd", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.1", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "db68c173234b07ab2a07f645a5acdc117b9f99d69ebf521821d89690ae6c6ec8"}, "nimble_parsec": {:hex, :nimble_parsec, "1.1.0", "3a6fca1550363552e54c216debb6a9e95bd8d32348938e13de5eda962c0d7f89", [:mix], [], "hexpm", "08eb32d66b706e913ff748f11694b17981c0b04a33ef470e33e11b3d3ac8f54b"}, diff --git a/test/ecto/adapters/exqlite_test.exs b/test/ecto/adapters/exqlite_test.exs new file mode 100644 index 00000000..56ccf284 --- /dev/null +++ b/test/ecto/adapters/exqlite_test.exs @@ -0,0 +1,192 @@ +defmodule Ecto.Adapters.ExqliteTest do + use ExUnit.Case, async: true + + import Ecto.Query + + alias Ecto.Queryable + alias Ecto.Adapters.Exqlite.Connection, as: Connection + alias Ecto.Migration.Reference + + ## + ## Schema Definitions + ## + + defmodule Schema do + use Ecto.Schema + + schema "schema" do + field(:x, :integer) + field(:y, :integer) + field(:z, :integer) + field(:meta, :map) + + has_many(:comments, Ecto.Adapters.ExqliteTest.Schema2, + references: :x, + foreign_key: :z + ) + + has_one(:permalink, Ecto.Adapters.ExqliteTest.Schema3, + references: :y, + foreign_key: :id + ) + end + end + + defmodule Schema2 do + use Ecto.Schema + + schema "schema2" do + belongs_to(:post, Ecto.Adapters.ExqliteTest.Schema, + references: :x, + foreign_key: :z + ) + end + end + + defmodule Schema3 do + use Ecto.Schema + + schema "schema3" do + field(:binary, :binary) + end + end + + defp plan(query, operation \\ :all) do + {query, _params} = + Ecto.Adapter.Queryable.plan_query(operation, Ecto.Adapters.Exqlite, query) + + query + end + + ## + ## Helpers + ## + + defp all(query), do: query |> Connection.all() |> IO.iodata_to_binary() + defp update_all(query), do: query |> Connection.update_all() |> IO.iodata_to_binary() + defp delete_all(query), do: query |> Connection.delete_all() |> IO.iodata_to_binary() + + defp execute_ddl(query) do + query + |> Connection.execute_ddl() + |> Enum.map(&IO.iodata_to_binary/1) + end + + defp insert(prefx, table, header, rows, on_conflict, returning) do + Connection.insert(prefx, table, header, rows, on_conflict, returning, []) + |> IO.iodata_to_binary() + end + + defp update(prefx, table, fields, filter, returning) do + Connection.update(prefx, table, fields, filter, returning) + |> IO.iodata_to_binary() + end + + defp delete(prefx, table, filter, returning) do + Connection.delete(prefx, table, filter, returning) + |> IO.iodata_to_binary() + end + + defp remove_newlines(string) do + string + |> String.trim() + |> String.replace("\n", " ") + end + + ## + ## Tests + ## + + describe ".all/1" do + test "thing" do + Schema + |> Ecto.Query.select([r], r.x) + |> plan() + |> Connection.all() + + query = Schema |> select([r], r.x) |> plan() + assert all(query) == ~s{SELECT s0.x FROM schema AS s0} + end + end + + describe ".create_alias/1" do + test "returns first character" do + assert ?p == Connection.create_alias("post") + end + + test "returns ?t when the first value is not a-z A-Z" do + assert ?t == Connection.create_alias("0post") + end + end + + describe ".create_name/3" do + test "for a fragment" do + assert Connection.create_name({{:fragment, nil, nil}}, 0, []) == + {nil, [?f | "0"], nil} + + assert Connection.create_name({{}, {:fragment, nil, nil}}, 1, []) == + {nil, [?f | "1"], nil} + + assert Connection.create_name({{}, {}, {:fragment, nil, nil}}, 2, []) == + {nil, [?f | "2"], nil} + end + + test "for a table" do + assert Connection.create_name({{"table_name", "schema_name", nil}}, 0, []) == + {["table_name"], [?t | "0"], "schema_name"} + + assert Connection.create_name({{}, {"table_name", "schema_name", nil}}, 1, []) == + {["table_name"], [?t | "1"], "schema_name"} + + assert Connection.create_name({{}, {}, {"table_name", "schema_name", nil}}, 2, []) == + {["table_name"], [?t | "2"], "schema_name"} + end + end + + describe ".create_names/1" do + test "creates names with a schema" do + query = select(Schema, [r], r.x) |> plan() + assert Connection.create_names(query) == {{["schema"], [?s | "0"], Schema}, []} + end + + test "creates names without a schema" do + query = select("posts", [r], r.x) |> plan() + assert Connection.create_names(query) == {{["posts"], [?p | "0"], nil}, []} + end + + test "creates names with a fragment" do + query = select("posts", [r], fragment("?", r)) |> plan() + assert Connection.create_names(query) == {{["posts"], [?p | "0"], nil}, []} + end + + test "creates names that have a leading number" do + query = select("0posts", [:x]) |> plan() + assert Connection.create_names(query) == {{["0posts"], [?t | "0"], nil}, []} + end + + test "creates names without a schema and a subquery" do + query = + subquery("posts" |> select([r], %{x: r.x, y: r.y})) + |> select([r], r.x) + |> plan() + + assert Connection.create_names(query) == {{nil, [?s | "0"], nil}, []} + end + + test "creates names with deeper selects" do + query = + subquery("posts" |> select([r], %{x: r.x, z: r.y})) |> select([r], r) |> plan() + + assert Connection.create_names(query) == {{nil, [?s | "0"], nil}, []} + end + + test "creates names with a subquery of another subquery" do + query = + subquery(subquery("posts" |> select([r], %{x: r.x, z: r.y})) |> select([r], r)) + |> select([r], r) + |> plan() + + assert Connection.create_names(query) == {{nil, [?s | "0"], nil}, []} + end + end +end diff --git a/test/ecto/adapters/other_test.exs b/test/ecto/adapters/other_test.exs new file mode 100644 index 00000000..fc430102 --- /dev/null +++ b/test/ecto/adapters/other_test.exs @@ -0,0 +1,1928 @@ +defmodule Exqlite.BaseTest do + # , async: true + use ExUnit.Case + + # IMPORTANT: This is closely modeled on Ecto's postgres_test.exs file. + # We strive to avoid structural differences between that file and this one. + + alias Ecto.Integration.Post + alias Ecto.Integration.TestRepo + alias Ecto.Migration.Table + alias Ecto.Adapters.Exqlite.Connection, as: SQL + + import Ecto.Query + + describe "storage_up" do + test "fails with :already_up on second call" do + tmp = [database: tempfilename()] + assert Exqlite.storage_up(tmp) == :ok + assert File.exists?(tmp[:database]) + assert Exqlite.storage_up(tmp) == {:error, :already_up} + File.rm(tmp[:database]) + end + + test "fails with helpful error message if no database specified" do + assert_raise ArgumentError, + """ + No SQLite database path specified. Please check the configuration for your Repo. + Your config/*.exs file should have something like this in it: + + config :my_app, MyApp.Repo, + adapter: Exqlite, + database: "/path/to/sqlite/database" + + Options provided were: + + [mumble: "no database here"] + + """, + fn -> Exqlite.storage_up(mumble: "no database here") == :ok end + end + end + + test "storage down (twice)" do + tmp = [database: tempfilename()] + assert Exqlite.storage_up(tmp) == :ok + assert Exqlite.storage_down(tmp) == :ok + refute File.exists?(tmp[:database]) + assert Exqlite.storage_down(tmp) == {:error, :already_down} + end + + test "storage up creates directory" do + dir = "/tmp/my_sqlite_ecto_directory/" + File.rm_rf!(dir) + tmp = [database: dir <> tempfilename()] + :ok = Exqlite.storage_up(tmp) + assert File.exists?(dir <> "tmp/") && File.dir?(dir <> "tmp/") + end + + # return a unique temporary filename + defp tempfilename do + 1..10 + |> Enum.map(fn _ -> :rand.uniform(10) - 1 end) + |> Enum.join() + |> (fn name -> "/tmp/test_" <> name <> ".db" end).() + end + + import Ecto.Query + + alias Ecto.Queryable + + defmodule Schema do + use Ecto.Schema + + schema "schema" do + field(:x, :integer) + field(:y, :integer) + field(:z, :integer) + + has_many(:comments, Exqlite.Test.Schema2, + references: :x, + foreign_key: :z + ) + + has_one(:permalink, Exqlite.Test.Schema3, + references: :y, + foreign_key: :id + ) + end + end + + defmodule SchemaWithArray do + use Ecto.Schema + + schema "schema" do + field(:x, :integer) + field(:y, :integer) + field(:z, :integer) + field(:w, {:array, :integer}) + end + end + + defmodule Schema2 do + use Ecto.Schema + + schema "schema2" do + belongs_to(:post, Exqlite.Test.Schema, + references: :x, + foreign_key: :z + ) + end + end + + defmodule Schema3 do + use Ecto.Schema + + schema "schema3" do + field(:list1, {:array, :string}) + field(:list2, {:array, :integer}) + field(:binary, :binary) + end + end + + defp plan(query, operation \\ :all) do + {query, _params} = + Ecto.Adapter.Queryable.plan_query(operation, Ecto.Adapters.Exqlite, query) + + query + end + + defp normalize(query, operation \\ :all, counter \\ 0) do + {query, _params, _key} = + Ecto.Query.Planner.prepare(query, operation, Exqlite, counter) + + {query, _} = Ecto.Query.Planner.normalize(query, operation, Exqlite, counter) + query + end + + defp all(query), do: query |> SQL.all() |> IO.iodata_to_binary() + + defp update_all(query), + do: query |> SQL.update_all() |> IO.iodata_to_binary() + + defp delete_all(query), + do: query |> SQL.delete_all() |> IO.iodata_to_binary() + + defp execute_ddl(query), + do: query |> SQL.execute_ddl() |> Enum.map(&IO.iodata_to_binary/1) + + defp insert(prefx, table, header, rows, on_conflict, returning) do + IO.iodata_to_binary(SQL.insert(prefx, table, header, rows, on_conflict, returning)) + end + + defp update(prefx, table, fields, filter, returning) do + IO.iodata_to_binary(SQL.update(prefx, table, fields, filter, returning)) + end + + defp delete(prefx, table, filter, returning) do + IO.iodata_to_binary(SQL.delete(prefx, table, filter, returning)) + end + + defp remove_newlines(string), do: String.trim(string) |> String.replace("\n", " ") + + test "from" do + query = Schema |> select([r], r.x) |> normalize + assert all(query) == ~s{SELECT s0.x FROM schema AS s0} + end + + test "from without schema" do + query = "posts" |> select([r], r.x) |> normalize + assert all(query) == ~s{SELECT p0.x FROM posts AS p0} + + query = "posts" |> select([:x]) |> normalize + assert all(query) == ~s{SELECT p0.x FROM posts AS p0} + + assert_raise Ecto.QueryError, + ~r"SQLite does not support selecting all fields from \"posts\" without a schema", + fn -> + all(from(p in "posts", select: p) |> normalize()) + end + end + + test "from with subquery" do + query = + subquery("posts" |> select([r], %{x: r.x, y: r.y})) + |> select([r], r.x) + |> normalize + + assert all(query) == + ~s{SELECT s0.x FROM (SELECT p0.x AS x, p0.y AS y FROM posts AS p0) AS s0} + + query = + subquery("posts" |> select([r], %{x: r.x, z: r.y})) |> select([r], r) |> normalize + + assert all(query) == + ~s{SELECT s0.x, s0.z FROM (SELECT p0.x AS x, p0.y AS z FROM posts AS p0) AS s0} + end + + test "select" do + query = Schema |> select([r], {r.x, r.y}) |> normalize + assert all(query) == ~s{SELECT s0.x, s0.y FROM schema AS s0} + + query = Schema |> select([r], [r.x, r.y]) |> normalize + assert all(query) == ~s{SELECT s0.x, s0.y FROM schema AS s0} + + query = Schema |> select([r], struct(r, [:x, :y])) |> normalize + assert all(query) == ~s{SELECT s0.x, s0.y FROM schema AS s0} + end + + test "aggregates" do + query = Schema |> select([r], count(r.x)) |> normalize + assert all(query) == ~s{SELECT count(s0.x) FROM schema AS s0} + + query = Schema |> select([r], count(r.x, :distinct)) |> normalize + assert all(query) == ~s{SELECT count(DISTINCT s0.x) FROM schema AS s0} + end + + test "distinct" do + assert_raise ArgumentError, + "DISTINCT with multiple columns is not supported by SQLite", + fn -> + query = + Schema + |> distinct([r], r.x) + |> select([r], {r.x, r.y}) + |> normalize + + all(query) + end + + assert_raise ArgumentError, + "DISTINCT with multiple columns is not supported by SQLite", + fn -> + query = + Schema + |> distinct([r], desc: r.x) + |> select([r], {r.x, r.y}) + |> normalize + + all(query) + end + + assert_raise ArgumentError, + "DISTINCT with multiple columns is not supported by SQLite", + fn -> + query = Schema |> distinct([r], 2) |> select([r], r.x) |> normalize + all(query) + end + + assert_raise ArgumentError, + "DISTINCT with multiple columns is not supported by SQLite", + fn -> + query = + Schema + |> distinct([r], [r.x, r.y]) + |> select([r], {r.x, r.y}) + |> normalize + + all(query) + end + + query = Schema |> distinct([r], true) |> select([r], {r.x, r.y}) |> normalize + assert all(query) == ~s{SELECT DISTINCT s0.x, s0.y FROM schema AS s0} + + query = Schema |> distinct([r], false) |> select([r], {r.x, r.y}) |> normalize + assert all(query) == ~s{SELECT s0.x, s0.y FROM schema AS s0} + + query = Schema |> distinct(true) |> select([r], {r.x, r.y}) |> normalize + assert all(query) == ~s{SELECT DISTINCT s0.x, s0.y FROM schema AS s0} + + query = Schema |> distinct(false) |> select([r], {r.x, r.y}) |> normalize + assert all(query) == ~s{SELECT s0.x, s0.y FROM schema AS s0} + end + + test "distinct with order by" do + assert_raise ArgumentError, + "DISTINCT with multiple columns is not supported by SQLite", + fn -> + query = + Schema + |> order_by([r], [r.y]) + |> distinct([r], desc: r.x) + |> select([r], r.x) + |> normalize + + all(query) + end + end + + test "where" do + query = + Schema + |> where([r], r.x == 42) + |> where([r], r.y != 43) + |> select([r], r.x) + |> normalize + + assert all(query) == + ~s{SELECT s0.x FROM schema AS s0 WHERE (s0.x = 42) AND (s0.y != 43)} + end + + test "or_where" do + query = + Schema + |> or_where([r], r.x == 42) + |> or_where([r], r.y != 43) + |> select([r], r.x) + |> normalize + + assert all(query) == + ~s{SELECT s0.x FROM schema AS s0 WHERE (s0.x = 42) OR (s0.y != 43)} + + query = + Schema + |> or_where([r], r.x == 42) + |> or_where([r], r.y != 43) + |> where([r], r.z == 44) + |> select([r], r.x) + |> normalize + + assert all(query) == + ~s{SELECT s0.x FROM schema AS s0 WHERE ((s0.x = 42) OR (s0.y != 43)) AND (s0.z = 44)} + end + + test "order by" do + query = Schema |> order_by([r], r.x) |> select([r], r.x) |> normalize + assert all(query) == ~s{SELECT s0.x FROM schema AS s0 ORDER BY s0.x} + + query = Schema |> order_by([r], [r.x, r.y]) |> select([r], r.x) |> normalize + assert all(query) == ~s{SELECT s0.x FROM schema AS s0 ORDER BY s0.x, s0.y} + + query = + Schema |> order_by([r], asc: r.x, desc: r.y) |> select([r], r.x) |> normalize + + assert all(query) == + ~s{SELECT s0.x FROM schema AS s0 ORDER BY s0.x, s0.y DESC} + + query = Schema |> order_by([r], []) |> select([r], r.x) |> normalize + assert all(query) == ~s{SELECT s0.x FROM schema AS s0} + end + + test "limit and offset" do + query = Schema |> limit([r], 3) |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM schema AS s0 LIMIT 3} + + query = Schema |> offset([r], 5) |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM schema AS s0 OFFSET 5} + + query = Schema |> offset([r], 5) |> limit([r], 3) |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM schema AS s0 LIMIT 3 OFFSET 5} + end + + test "lock" do + assert_raise ArgumentError, "locks are not supported by SQLite", fn -> + query = Schema |> lock("FOR SHARE NOWAIT") |> select([], 0) |> normalize + all(query) + end + end + + test "string escape" do + query = "schema" |> where(foo: "'\\ ") |> select([], true) |> normalize + + assert all(query) == + ~s{SELECT 1 FROM schema AS s0 WHERE (s0.foo = '''\\ ')} + + query = "schema" |> where(foo: "'") |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = '''')} + end + + test "binary ops" do + query = Schema |> select([r], r.x == 2) |> normalize + assert all(query) == ~s{SELECT s0."x" = 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x != 2) |> normalize + assert all(query) == ~s{SELECT s0."x" != 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x <= 2) |> normalize + assert all(query) == ~s{SELECT s0."x" <= 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x >= 2) |> normalize + assert all(query) == ~s{SELECT s0."x" >= 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x < 2) |> normalize + assert all(query) == ~s{SELECT s0."x" < 2 FROM "schema" AS s0} + + query = Schema |> select([r], r.x > 2) |> normalize + assert all(query) == ~s{SELECT s0."x" > 2 FROM "schema" AS s0} + end + + test "is_nil" do + query = Schema |> select([r], is_nil(r.x)) |> normalize + assert all(query) == ~s{SELECT s0."x" IS NULL FROM "schema" AS s0} + + query = Schema |> select([r], not is_nil(r.x)) |> normalize + assert all(query) == ~s{SELECT NOT (s0."x" IS NULL) FROM "schema" AS s0} + end + + test "fragments" do + query = Schema |> select([r], fragment("ltrim(?)", r.x)) |> normalize + assert all(query) == ~s{SELECT ltrim(s0."x") FROM "schema" AS s0} + + value = 13 + query = Schema |> select([r], fragment("ltrim(?, ?)", r.x, ^value)) |> normalize + assert all(query) == ~s{SELECT ltrim(s0."x", ?1) FROM "schema" AS s0} + + query = Schema |> select([], fragment(title: 2)) |> normalize + + assert_raise Ecto.QueryError, + ~r"SQLite adapter does not support keyword or interpolated fragments", + fn -> + all(query) + end + end + + test "literals" do + query = "schema" |> where(foo: true) |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 1)} + + query = "schema" |> where(foo: false) |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 0)} + + query = "schema" |> where(foo: "abc") |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 'abc')} + + query = "schema" |> where(foo: <<0, ?a, ?b, ?c>>) |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = X'00616263')} + + query = "schema" |> where(foo: 123) |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 123)} + + query = "schema" |> where(foo: 123.0) |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 WHERE (s0."foo" = 123.0)} + end + + test "tagged type" do + query = + Schema + |> select([], type(^"601d74e4-a8d3-4b6e-8365-eddb4c893327", Ecto.UUID)) + |> normalize + + assert all(query) == ~s{SELECT CAST (?1 AS TEXT) FROM "schema" AS s0} + + assert_raise ArgumentError, "Array type is not supported by SQLite", fn -> + query = Schema |> select([], type(^[1, 2, 3], {:array, :integer})) |> normalize + all(query) + end + end + + test "nested expressions" do + z = 123 + + query = + from(r in Schema, []) + |> select([r], (r.x > 0 and r.y > ^(-z)) or true) + |> normalize + + assert all(query) == + ~s{SELECT ((s0."x" > 0) AND (s0."y" > ?1)) OR 1 FROM "schema" AS s0} + end + + test "in expression" do + query = Schema |> select([e], 1 in []) |> normalize + assert all(query) == ~s{SELECT 1 IN () FROM "schema" AS s0} + + query = Schema |> select([e], 1 in [1, e.x, 3]) |> normalize + assert all(query) == ~s{SELECT 1 IN (1,s0."x",3) FROM "schema" AS s0} + + query = Schema |> select([e], 1 in ^[]) |> normalize + assert all(query) == ~s{SELECT 1 IN () FROM "schema" AS s0} + + query = Schema |> select([e], 1 in ^[1, 2, 3]) |> normalize + assert all(query) == ~s{SELECT 1 IN (?1,?2,?3) FROM "schema" AS s0} + + query = Schema |> select([e], 1 in [1, ^2, 3]) |> normalize + assert all(query) == ~s{SELECT 1 IN (1,?1,3) FROM "schema" AS s0} + + query = Schema |> select([e], ^1 in [1, ^2, 3]) |> normalize + assert all(query) == ~s{SELECT ?1 IN (1,?2,3) FROM "schema" AS s0} + + query = Schema |> select([e], ^1 in ^[1, 2, 3]) |> normalize + assert all(query) == ~s{SELECT ?1 IN (?2,?3,?4) FROM "schema" AS s0} + + # query = Schema |> select([e], 1 in e.w) |> normalize + # assert all(query) == ~s{SELECT 1 = ANY(s0."w") FROM "schema" AS s0} + # This assertion omitted because we can't support array values. + + query = Schema |> select([e], 1 in fragment("foo")) |> normalize + assert all(query) == ~s{SELECT 1 IN (foo) FROM "schema" AS s0} + end + + test "having" do + query = Schema |> having([p], p.x == p.x) |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x")} + + query = + Schema + |> having([p], p.x == p.x) + |> having([p], p.y == p.y) + |> select([], true) + |> normalize + + assert all(query) == + ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x") AND (s0."y" = s0."y")} + end + + test "or_having" do + query = Schema |> or_having([p], p.x == p.x) |> select([], true) |> normalize + assert all(query) == ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x")} + + query = + Schema + |> or_having([p], p.x == p.x) + |> or_having([p], p.y == p.y) + |> select([], true) + |> normalize + + assert all(query) == + ~s{SELECT 1 FROM "schema" AS s0 HAVING (s0."x" = s0."x") OR (s0."y" = s0."y")} + end + + test "group by" do + query = Schema |> group_by([r], r.x) |> select([r], r.x) |> normalize + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY s0."x"} + + query = Schema |> group_by([r], 2) |> select([r], r.x) |> normalize + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY 2} + + query = Schema |> group_by([r], [r.x, r.y]) |> select([r], r.x) |> normalize + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0 GROUP BY s0."x", s0."y"} + + query = Schema |> group_by([r], []) |> select([r], r.x) |> normalize + assert all(query) == ~s{SELECT s0."x" FROM "schema" AS s0} + end + + test "arrays and sigils" do + assert_raise ArgumentError, "Array values are not supported by SQLite", fn -> + query = Schema |> select([], fragment("?", [1, 2, 3])) |> normalize + all(query) + end + + assert_raise ArgumentError, "Array values are not supported by SQLite", fn -> + query = Schema |> select([], fragment("?", ~w(abc def))) |> normalize + all(query) + end + end + + test "interpolated values" do + cte1 = + "schema1" |> select([m], %{id: m.id, smth: ^true}) |> where([], fragment("?", ^1)) + + union = "schema1" |> select([m], {m.id, ^true}) |> where([], fragment("?", ^5)) + union_all = "schema2" |> select([m], {m.id, ^false}) |> where([], fragment("?", ^6)) + + query = + Schema + |> with_cte("cte1", as: ^cte1) + |> with_cte("cte2", as: fragment("SELECT * FROM schema WHERE ?", ^2)) + |> select([m], {m.id, ^0}) + |> join(:inner, [], Schema2, on: fragment("?", ^true)) + |> join(:inner, [], Schema2, on: fragment("?", ^false)) + |> where([], fragment("?", ^true)) + |> where([], fragment("?", ^false)) + |> having([], fragment("?", ^true)) + |> having([], fragment("?", ^false)) + |> group_by([], fragment("?", ^3)) + |> group_by([], fragment("?", ^4)) + |> union(^union) + |> union_all(^union_all) + |> order_by([], fragment("?", ^7)) + |> limit([], ^8) + |> offset([], ^9) + |> plan() + + result = + "WITH cte1 AS (SELECT s0.id AS id, ? AS smth FROM schema1 AS s0 WHERE (?)), " <> + "cte2 AS (SELECT * FROM schema WHERE ?) " <> + "SELECT s0.id, ? FROM schema AS s0 INNER JOIN schema2 AS s1 ON ? " <> + "INNER JOIN schema2 AS s2 ON ? WHERE (?) AND (?) " <> + "GROUP BY ?, ? HAVING (?) AND (?) " <> + "UNION (SELECT s0.id, ? FROM schema1 AS s0 WHERE (?)) " <> + "UNION ALL (SELECT s0.id, ? FROM schema2 AS s0 WHERE (?)) " <> + "ORDER BY ? LIMIT ? OFFSET ?" + + assert all(query) == String.trim(result) + end + + test "fragments and types" do + query = + normalize( + from(e in "schema", + where: + fragment( + "extract(? from ?) = ?", + ^"month", + e.start_time, + type(^"4", :integer) + ), + where: + fragment( + "extract(? from ?) = ?", + ^"year", + e.start_time, + type(^"2015", :integer) + ), + select: true + ) + ) + + result = + "SELECT 1 FROM \"schema\" AS s0 " <> + "WHERE (extract(?1 from s0.\"start_time\") = ?2) " <> + "AND (extract(?3 from s0.\"start_time\") = ?4)" + + assert all(query) == String.trim(result) + end + + test "fragments allow ? to be escaped with backslash" do + query = + normalize( + from(e in "schema", + where: fragment("? = \"query\\?\"", e.start_time), + select: true + ) + ) + + result = + "SELECT 1 FROM \"schema\" AS s0 " <> + "WHERE (s0.\"start_time\" = \"query?\")" + + assert all(query) == String.trim(result) + end + + ## *_all + + test "update all" do + query = from(m in Schema, update: [set: [x: 0]]) |> normalize(:update_all) + + assert update_all(query) == + ~s{UPDATE "schema" SET "x" = 0} + + query = + from(m in Schema, update: [set: [x: 0], inc: [y: 1, z: -3]]) + |> normalize(:update_all) + + assert update_all(query) == + ~s{UPDATE "schema" SET "x" = 0, "y" = "schema"."y" + 1, "z" = "schema"."z" + -3} + + query = + from(e in Schema, where: e.x == 123, update: [set: [x: 0]]) + |> normalize(:update_all) + + assert update_all(query) == + ~s{UPDATE "schema" SET "x" = 0 WHERE ("schema"."x" = 123)} + + query = from(m in Schema, update: [set: [x: ^0]]) |> normalize(:update_all) + + assert update_all(query) == + ~s{UPDATE "schema" SET "x" = ?1} + + # assert_raise ArgumentError, + # "JOINS are not supported on UPDATE statements by SQLite", + # fn -> + # query = + # Schema + # |> join(:inner, [p], q in Schema2, p.x == q.z) + # |> update([_], set: [x: 0]) + # |> normalize(:update_all) + + # update_all(query) + # end + + # assert_raise ArgumentError, + # "JOINS are not supported on UPDATE statements by SQLite", + # fn -> + # query = + # from(e in Schema, + # where: e.x == 123, + # update: [set: [x: 0]], + # join: q in Schema2, + # on: e.x == q.z + # ) + # |> normalize(:update_all) + + # update_all(query) + # end + end + + test "update all with returning" do + query = + from(m in Schema, update: [set: [x: 0]]) + |> select([m], m) + |> normalize(:update_all) + + assert update_all(query) == + ~s{UPDATE "schema" SET "x" = 0 ;--RETURNING ON UPDATE "schema","id","x","y","z"} + + # diff SQLite syntax + end + + test "update all array ops" do + assert_raise ArgumentError, "Array operations are not supported by SQLite", fn -> + query = + from(m in SchemaWithArray, update: [push: [w: 0]]) |> normalize(:update_all) + + update_all(query) + end + + assert_raise ArgumentError, "Array operations are not supported by SQLite", fn -> + query = + from(m in SchemaWithArray, update: [pull: [w: 0]]) |> normalize(:update_all) + + update_all(query) + end + end + + # new don't know what to expect + test "update all with prefix" do + query = from(m in Schema, update: [set: [x: 0]]) |> normalize(:update_all) + + assert update_all(%{query | prefix: "prefix"}) == + ~s{UPDATE "prefix"."schema" SET "x" = 0} + end + + test "delete all" do + query = Schema |> Queryable.to_query() |> normalize + assert delete_all(query) == ~s{DELETE FROM "schema"} + + query = from(e in Schema, where: e.x == 123) |> normalize + + assert delete_all(query) == + ~s{DELETE FROM "schema" WHERE ("schema"."x" = 123)} + + # assert_raise ArgumentError, + # "JOINS are not supported on DELETE statements by SQLite", + # fn -> + # query = + # Schema |> join(:inner, [p], q in Schema2, p.x == q.z) |> normalize + + # delete_all(query) + # end + + # assert_raise ArgumentError, + # "JOINS are not supported on DELETE statements by SQLite", + # fn -> + # query = + # from(e in Schema, + # where: e.x == 123, + # join: q in Schema2, + # on: e.x == q.z + # ) + # |> normalize + + # delete_all(query) + # end + + # assert_raise ArgumentError, + # "JOINS are not supported on DELETE statements by SQLite", + # fn -> + # query = + # from(e in Schema, + # where: e.x == 123, + # join: assoc(e, :comments), + # join: assoc(e, :permalink) + # ) + # |> normalize + + # delete_all(query) + # end + end + + test "delete all with returning" do + query = Schema |> Queryable.to_query() |> select([m], m) |> normalize + + assert delete_all(query) == + ~s{DELETE FROM "schema" ;--RETURNING ON DELETE "schema","id","x","y","z"} + end + + test "delete all with prefix" do + query = Schema |> Queryable.to_query() |> normalize + assert delete_all(%{query | prefix: "prefix"}) == ~s{DELETE FROM "prefix"."schema"} + end + + ## + ## Partitions and windows + ## + + describe "windows" do + test "one window" do + query = + Schema + |> select([r], r.x) + |> windows([r], w: [partition_by: r.x]) + |> plan + + assert all(query) == + ~s{SELECT s0.x FROM schema AS s0 WINDOW w AS (PARTITION BY s0.x)} + end + + test "two windows" do + query = + Schema + |> select([r], r.x) + |> windows([r], w1: [partition_by: r.x], w2: [partition_by: r.y]) + |> plan() + + assert all(query) == + ~s{SELECT s0.x FROM schema AS s0 WINDOW w1 AS (PARTITION BY s0.x), w2 AS (PARTITION BY s0.y)} + end + + test "count over window" do + query = + Schema + |> windows([r], w: [partition_by: r.x]) + |> select([r], count(r.x) |> over(:w)) + |> plan() + + assert all(query) == + ~s{SELECT count(s0.x) OVER w FROM schema AS s0 WINDOW w AS (PARTITION BY s0.x)} + end + + test "count over all" do + query = + Schema + |> select([r], count(r.x) |> over) + |> plan() + + assert all(query) == ~s{SELECT count(s0.x) OVER () FROM schema AS s0} + end + + test "row_number over all" do + query = + Schema + |> select(row_number |> over) + |> plan() + + assert all(query) == ~s{SELECT row_number() OVER () FROM schema AS s0} + end + + test "nth_value over all" do + query = + Schema + |> select([r], nth_value(r.x, 42) |> over) + |> plan() + + assert all(query) == ~s{SELECT nth_value(s0.x, 42) OVER () FROM schema AS s0} + end + + test "lag/2 over all" do + query = + Schema + |> select([r], lag(r.x, 42) |> over) + |> plan() + + assert all(query) == ~s{SELECT lag(s0.x, 42) OVER () FROM schema AS s0} + end + + test "custom aggregation over all" do + query = + Schema + |> select([r], fragment("custom_function(?)", r.x) |> over) + |> plan() + + assert all(query) == ~s{SELECT custom_function(s0.x) OVER () FROM schema AS s0} + end + + test "partition by and order by on window" do + query = + Schema + |> windows([r], w: [partition_by: [r.x, r.z], order_by: r.x]) + |> select([r], r.x) + |> plan() + + assert all(query) == + ~s{SELECT s0.x FROM schema AS s0 WINDOW w AS (PARTITION BY s0.x, s0.z ORDER BY s0.x)} + end + + test "partition by and order by on over" do + query = + Schema + |> select([r], count(r.x) |> over(partition_by: [r.x, r.z], order_by: r.x)) + + query = query |> plan() + + assert all(query) == + ~s{SELECT count(s0.x) OVER (PARTITION BY s0.x, s0.z ORDER BY s0.x) FROM schema AS s0} + end + + test "frame clause" do + query = + Schema + |> select( + [r], + count(r.x) + |> over( + partition_by: [r.x, r.z], + order_by: r.x, + frame: fragment("ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING") + ) + ) + + query = query |> plan() + + assert all(query) == + ~s{SELECT count(s0.x) OVER (PARTITION BY s0.x, s0.z ORDER BY s0.x ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) FROM schema AS s0} + end + end + + ## + ## Joins + ## + + test "join" do + query = + Schema + |> join(:inner, [p], q in Schema2, on: p.x == q.z) + |> select([], true) + |> plan() + + assert all(query) == + ~s{SELECT TRUE FROM schema AS s0 INNER JOIN schema2 AS s1 ON s0.x = s1.z} + + query = + Schema + |> join(:inner, [p], q in Schema2, on: p.x == q.z) + |> join(:inner, [], Schema, on: true) + |> select([], true) + |> plan() + + assert all(query) == + ~s{SELECT TRUE FROM schema AS s0 INNER JOIN schema2 AS s1 ON s0.x = s1.z } <> + ~s{INNER JOIN schema AS s2 ON TRUE} + end + + test "join with hints" do + assert Schema + |> join(:inner, [p], q in Schema2, hints: ["USE INDEX FOO", "USE INDEX BAR"]) + |> select([], true) + |> plan() + |> all() == + ~s{SELECT TRUE FROM schema AS s0 INNER JOIN schema2 AS s1 USE INDEX FOO USE INDEX BAR ON TRUE} + end + + test "join with nothing bound" do + query = + Schema + |> join(:inner, [], q in Schema2, on: q.z == q.z) + |> select([], true) + |> plan() + + assert all(query) == + ~s{SELECT TRUE FROM schema AS s0 INNER JOIN schema2 AS s1 ON s1.z = s1.z} + end + + test "join without schema" do + query = + "posts" + |> join(:inner, [p], q in "comments", on: p.x == q.z) + |> select([], true) + |> plan() + + assert all(query) == + ~s{SELECT TRUE FROM posts AS p0 INNER JOIN comments AS c1 ON p0.x = c1.z} + end + + test "join with subquery" do + posts = + subquery("posts" |> where(title: ^"hello") |> select([r], %{x: r.x, y: r.y})) + + query = + "comments" + |> join(:inner, [c], p in subquery(posts), on: true) + |> select([_, p], p.x) + |> plan() + + assert all(query) == + ~s{SELECT s1.x FROM comments AS c0 } <> + ~s{INNER JOIN (SELECT sp0.x AS x, sp0.y AS y FROM posts AS sp0 WHERE (sp0.title = ?)) AS s1 ON TRUE} + + posts = + subquery("posts" |> where(title: ^"hello") |> select([r], %{x: r.x, z: r.y})) + + query = + "comments" + |> join(:inner, [c], p in subquery(posts), on: true) + |> select([_, p], p) + |> plan() + + assert all(query) == + ~s{SELECT s1.x, s1.z FROM comments AS c0 } <> + ~s{INNER JOIN (SELECT sp0.x AS x, sp0.y AS z FROM posts AS sp0 WHERE (sp0.title = ?)) AS s1 ON TRUE} + + posts = + subquery( + "posts" + |> where(title: parent_as(:comment).subtitle) + |> select([r], r.title) + ) + + query = + "comments" + |> from(as: :comment) + |> join(:inner, [c], p in subquery(posts)) + |> select([_, p], p) + |> plan() + + assert all(query) == + "SELECT s1.title FROM comments AS c0 " <> + "INNER JOIN (SELECT sp0.title AS title FROM posts AS sp0 WHERE (sp0.title = c0.subtitle)) AS s1 ON TRUE" + end + + test "join with prefix" do + query = + Schema + |> join(:inner, [p], q in Schema2, on: p.x == q.z) + |> select([], true) + |> Map.put(:prefix, "prefix") + |> plan() + + assert all(query) == + ~s|SELECT TRUE FROM prefix.schema AS s0 INNER JOIN prefix.schema2 AS s1 ON s0.x = s1.z| + + query = + Schema + |> from(prefix: "first") + |> join(:inner, [p], q in Schema2, on: p.x == q.z, prefix: "second") + |> select([], true) + |> Map.put(:prefix, "prefix") + |> plan() + + assert all(query) == + ~s|SELECT TRUE FROM first.schema AS s0 INNER JOIN second.schema2 AS s1 ON s0.x = s1.z| + end + + test "join with fragment" do + query = + Schema + |> join( + :inner, + [p], + q in fragment( + "SELECT * FROM schema2 AS s2 WHERE s2.id = ? AND s2.field = ?", + p.x, + ^10 + ) + ) + |> select([p], {p.id, ^0}) + |> where([p], p.id > 0 and p.id < ^100) + |> plan() + + assert all(query) == + ~s|SELECT s0.id, ? FROM schema AS s0 INNER JOIN (SELECT * FROM schema2 AS s2 WHERE s2.id = s0.x AND s2.field = ?) AS f1 ON TRUE WHERE ((s0.id > 0) AND (s0.id < ?))| + end + + test "join with fragment and on defined" do + query = + Schema + |> join(:inner, [p], q in fragment("SELECT * FROM schema2"), on: q.id == p.id) + |> select([p], {p.id, ^0}) + |> plan() + + assert all(query) == + ~s|SELECT s0.id, ? FROM schema AS s0 INNER JOIN (SELECT * FROM schema2) AS f1 ON f1.id = s0.id| + end + + test "join with query interpolation" do + inner = Ecto.Queryable.to_query(Schema2) + query = from(p in Schema, left_join: c in ^inner, select: {p.id, c.id}) |> plan() + + assert all(query) == + ~s|SELECT s0.id, s1.id FROM schema AS s0 LEFT OUTER JOIN schema2 AS s1 ON TRUE| + end + + test "cross join" do + query = from(p in Schema, cross_join: c in Schema2, select: {p.id, c.id}) |> plan() + + assert all(query) == + ~s|SELECT s0.id, s1.id FROM schema AS s0 CROSS JOIN schema2 AS s1| + end + + test "join produces correct bindings" do + query = from(p in Schema, join: c in Schema2, on: true) + query = from(p in query, join: c in Schema2, on: true, select: {p.id, c.id}) + query = plan(query) + + assert all(query) == + ~s|SELECT s0.id, s2.id FROM schema AS s0 INNER JOIN schema2 AS s1 ON TRUE INNER JOIN schema2 AS s2 ON TRUE| + end + + ## Associations + + test "association join belongs_to" do + query = + Schema2 + |> join(:inner, [c], p in assoc(c, :post)) + |> select([], true) + |> normalize + + assert all(query) == + ~s|SELECT 1 FROM schema2 AS s0 INNER JOIN schema AS s1 ON s1.x = s0.z| + end + + test "association join has_many" do + query = + Schema + |> join(:inner, [p], c in assoc(p, :comments)) + |> select([], true) + |> normalize + + assert all(query) == + ~s|SELECT 1 FROM schema AS s0 INNER JOIN schema2 AS s1 ON s1.z = s0.x| + end + + test "association join has_one" do + query = + Schema + |> join(:inner, [p], pp in assoc(p, :permalink)) + |> select([], true) + |> normalize + + assert all(query) == + ~s|SELECT 1 FROM schema AS s0 INNER JOIN schema3 AS s1 ON s1.id = s0.y| + end + + # Schema based + + test "insert" do + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:raise, [], []}, [:id]) + + assert query == + ~s{INSERT INTO schema (x,y) VALUES (?1,?2)} + + assert_raise ArgumentError, + "Cell-wise default values are not supported on INSERT statements by SQLite", + fn -> + insert( + nil, + "schema", + [:x, :y], + [[:x, :y], [nil, :z]], + {:raise, [], []}, + [:id] + ) + end + + query = insert(nil, "schema", [], [[]], {:raise, [], []}, [:id]) + + assert query == ~s{INSERT INTO schema DEFAULT VALUES} + + query = insert(nil, "schema", [], [[]], {:raise, [], []}, []) + assert query == ~s{INSERT INTO "schema" DEFAULT VALUES} + + query = insert("prefix", "schema", [], [[]], {:raise, [], []}, [:id]) + + assert query == + ~s{INSERT INTO prefix.schema DEFAULT VALUES} + + query = insert("prefix", "schema", [], [[]], {:raise, [], []}, []) + assert query == ~s{INSERT INTO prefix.schema DEFAULT VALUES} + end + + test "insert with on conflict" do + # These tests are adapted from the Postgres Adaptor + + # For :nothing + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:nothing, [], []}, []) + + assert query == ~s{INSERT INTO schema (x,y) VALUES (?1,?2) ON CONFLICT DO NOTHING} + + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:nothing, [], [:x, :y]}, []) + + assert query == + ~s{INSERT INTO schema (x,y) VALUES (?1,?2) ON CONFLICT (x,y) DO NOTHING} + + # For :update + update = from("schema", update: [set: [z: "foo"]]) |> normalize(:update_all) + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) + + assert query == + ~s{INSERT INTO "schema" (x,y) VALUES (?1,?2) ON CONFLICT (x,y) DO UPDATE SET z = 'foo'} + + update = + from("schema", update: [set: [z: ^"foo"]], where: [w: true]) + |> normalize(:update_all, 2) + + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) + + assert query = + ~s{INSERT INTO schema (x,y) VALUES (?1,?2) ON CONFLICT (x,y) DO UPDATE SET z = ?3 WHERE (schema.w = 1)} + + update = normalize(from("schema", update: [set: [z: "foo"]]), :update_all) + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) + + assert query = + ~s{INSERT INTO schema (x,y) VALUES (?1,?2) ON CONFLICT (x,y) DO UPDATE SET z = 'foo'} + + update = + normalize( + from("schema", update: [set: [z: ^"foo"]], where: [w: true]), + :update_all, + 2 + ) + + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) + + assert query = + ~s{INSERT INTO "schema" (x,y) VALUES (?1,?2) ON CONFLICT (x,y) DO UPDATE SET z = ?3 WHERE (schema.w = 1)} + + # For :replace_all + assert_raise ArgumentError, "Upsert in SQLite requires :conflict_target", fn -> + conflict_target = [] + + insert( + nil, + "schema", + [:x, :y], + [[:x, :y]], + {:replace_all, [], conflict_target}, + [] + ) + end + + assert_raise ArgumentError, "Upsert in SQLite does not support ON CONSTRAINT", fn -> + insert( + nil, + "schema", + [:x, :y], + [[:x, :y]], + {:replace_all, [], {:constraint, :foo}}, + [] + ) + end + + query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:replace_all, [], [:id]}, []) + + assert query == + ~s{INSERT INTO schema (x,y) VALUES (?1,?2) ON CONFLICT (id) DO UPDATE SET x = EXCLUDED.x,y = EXCLUDED.y} + end + + test "update" do + query = update(nil, "schema", [:x, :y], [:id], []) + assert query == ~s{UPDATE schema SET x = ?1, y = ?2 WHERE id = ?3} + + query = update(nil, "schema", [:x, :y], [:id], [:z]) + + assert query == ~s{UPDATE schema SET x = ?1, y = ?2 WHERE id = ?3} + + query = update("prefix", "schema", [:x, :y], [:id], [:x, :z]) + + assert query == ~s{UPDATE prefix.schema SET x = ?1, y = ?2 WHERE id = ?3} + + query = update("prefix", "schema", [:x, :y], [:id], []) + assert query == ~s{UPDATE prefix.schema SET x = ?1, y = ?2 WHERE id = ?3} + end + + test "delete" do + query = delete(nil, "schema", [:x, :y], []) + assert query == ~s{DELETE FROM schema WHERE x = ?1 AND y = ?2} + + query = delete(nil, "schema", [:x, :y], [:z]) + + assert query == + ~s{DELETE FROM schema WHERE x = ?1 AND y = ?2} + + query = delete("prefix", "schema", [:x, :y], [:z]) + + assert query == + ~s{DELETE FROM prefix.schema WHERE x = ?1 AND y = ?2} + + query = delete(nil, "schema", [:x, :y], []) + assert query == ~s{DELETE FROM schema WHERE x = ?1 AND y = ?2} + + query = delete("prefix", "schema", [:x, :y], []) + assert query == ~s{DELETE FROM prefix.schema WHERE x = ?1 AND y = ?2} + end + + # DDL + + alias Ecto.Migration.Reference + + import Ecto.Migration, + only: [table: 1, table: 2, index: 2, index: 3, constraint: 2, constraint: 3] + + test "executing a string during migration" do + assert execute_ddl("example") == ["example"] + end + + test "keyword list during migration" do + assert_raise ArgumentError, + "SQLite adapter does not support keyword lists in execute", + fn -> + execute_ddl(testing: false) + end + end + + test "create table" do + create = + {:create, table(:posts), + [ + {:add, :name, :string, [default: "Untitled", size: 20, null: false]}, + {:add, :price, :numeric, + [precision: 8, scale: 2, default: {:fragment, "expr"}]}, + {:add, :on_hand, :integer, [default: 0, null: true]}, + {:add, :is_active, :boolean, [default: true]} + ]} + + assert execute_ddl(create) == [ + """ + CREATE TABLE posts (name TEXT DEFAULT 'Untitled' NOT NULL, + price NUMERIC DEFAULT expr, + on_hand INTEGER DEFAULT 0, + is_active BOOLEAN DEFAULT 1) + """ + |> remove_newlines + ] + end + + test "create table invalid default" do + create = + {:create, table(:posts), [{:add, :name, :string, [default: :atoms_not_allowed]}]} + + assert_raise ArgumentError, + ~r"unknown default :atoms_not_allowed for type :string", + fn -> + execute_ddl(create) + end + end + + test "create table array type" do + create = {:create, table(:posts), [{:add, :name, {:array, :numeric}, []}]} + + assert execute_ddl(create) == [ + ~s|CREATE TABLE posts (name JSON)| + ] + end + + test "create table illegal options" do + create = + {:create, table(:posts, options: [allowed: :not]), [{:add, :name, :string}]} + + assert_raise ArgumentError, + ~r"SQLite adapter does not support keyword lists in :options", + fn -> + execute_ddl(create) + end + end + + test "create table if not exists" do + create = + {:create_if_not_exists, table(:posts), + [ + {:add, :id, :serial, [primary_key: true]}, + {:add, :title, :string, []}, + {:add, :price, :decimal, [precision: 10, scale: 2]}, + {:add, :created_at, :datetime, []} + ]} + + query = execute_ddl(create) + + assert query == [ + """ + CREATE TABLE IF NOT EXISTS posts (id INTEGER PRIMARY KEY AUTOINCREMENT, + title TEXT, + price DECIMAL(10,2), + created_at DATETIME) + """ + |> remove_newlines + ] + end + + test "create table with prefix" do + create = + {:create, table(:posts, prefix: :foo), + [{:add, :category_0, %Reference{table: :categories}, []}]} + + assert execute_ddl(create) == [ + ~s|CREATE TABLE foo.posts (category_0 INTEGER CONSTRAINT posts_category_0_fkey REFERENCES foo.categories(id))| + ] + end + + test "create table with comment on columns and table" do + create = + {:create, table(:posts, comment: "comment"), + [ + {:add, :category_0, %Reference{table: :categories}, + [comment: "column comment"]}, + {:add, :created_at, :timestamp, []}, + {:add, :updated_at, :timestamp, [comment: "column comment 2"]} + ]} + + assert execute_ddl(create) == [ + ~s|CREATE TABLE posts (category_0 INTEGER CONSTRAINT posts_category_0_fkey REFERENCES categories(id), created_at TIMESTAMP, updated_at TIMESTAMP)| + ] + + # NOTE: Comments are not supported by SQLite. DDL query generator will ignore them. + end + + test "create table with comment on table" do + create = + {:create, table(:posts, comment: "table comment"), + [{:add, :category_0, %Reference{table: :categories}, []}]} + + assert execute_ddl(create) == [ + ~s|CREATE TABLE posts (category_0 INTEGER CONSTRAINT posts_category_0_fkey REFERENCES categories(id))| + ] + + # NOTE: Comments are not supported by SQLite. DDL query generator will ignore them. + end + + test "create table with comment on columns" do + create = + {:create, table(:posts), + [ + {:add, :category_0, %Reference{table: :categories}, + [comment: "column comment"]}, + {:add, :created_at, :timestamp, []}, + {:add, :updated_at, :timestamp, [comment: "column comment 2"]} + ]} + + assert execute_ddl(create) == [ + ~s|CREATE TABLE posts (category_0 INTEGER CONSTRAINT posts_category_0_fkey REFERENCES categories(id), created_at TIMESTAMP, updated_at TIMESTAMP)| + ] + + # NOTE: Comments are not supported by SQLite. DDL query generator will ignore them. + end + + test "create table with references" do + create = + {:create, table(:posts), + [ + {:add, :id, :serial, [primary_key: true]}, + {:add, :category_0, %Reference{table: :categories}, []}, + {:add, :category_1, %Reference{table: :categories, name: :foo_bar}, []}, + {:add, :category_2, %Reference{table: :categories, on_delete: :nothing}, []}, + {:add, :category_3, %Reference{table: :categories, on_delete: :delete_all}, + [null: false]}, + {:add, :category_4, %Reference{table: :categories, on_delete: :nilify_all}, + []}, + {:add, :category_5, %Reference{table: :categories, on_update: :nothing}, []}, + {:add, :category_6, %Reference{table: :categories, on_update: :update_all}, + [null: false]}, + {:add, :category_7, %Reference{table: :categories, on_update: :nilify_all}, + []}, + {:add, :category_8, + %Reference{ + table: :categories, + on_delete: :nilify_all, + on_update: :update_all + }, [null: false]} + ]} + + assert execute_ddl(create) == [ + """ + CREATE TABLE posts (id INTEGER PRIMARY KEY AUTOINCREMENT, + category_0 INTEGER CONSTRAINT posts_category_0_fkey REFERENCES categories(id), + category_1 INTEGER CONSTRAINT foo_bar REFERENCES categories(id), + category_2 INTEGER CONSTRAINT posts_category_2_fkey REFERENCES categories(id), + category_3 INTEGER NOT NULL CONSTRAINT posts_category_3_fkey REFERENCES categories(id) ON DELETE CASCADE, + category_4 INTEGER CONSTRAINT posts_category_4_fkey REFERENCES categories(id) ON DELETE SET NULL, + category_5 INTEGER CONSTRAINT posts_category_5_fkey REFERENCES categories(id), + category_6 INTEGER NOT NULL CONSTRAINT posts_category_6_fkey REFERENCES categories(id) ON UPDATE CASCADE, + category_7 INTEGER CONSTRAINT posts_category_7_fkey REFERENCES categories(id) ON UPDATE SET NULL, + category_8 INTEGER NOT NULL CONSTRAINT posts_category_8_fkey REFERENCES categories(id) ON DELETE SET NULL ON UPDATE CASCADE) + """ + |> remove_newlines + ] + end + + test "create table with references including prefixes" do + create = + {:create, table(:posts, prefix: :foo), + [ + {:add, :id, :serial, [primary_key: true]}, + {:add, :category_0, %Reference{table: :categories}, []}, + {:add, :category_1, %Reference{table: :categories, name: :foo_bar}, []}, + {:add, :category_2, %Reference{table: :categories, on_delete: :nothing}, []}, + {:add, :category_3, %Reference{table: :categories, on_delete: :delete_all}, + [null: false]}, + {:add, :category_4, %Reference{table: :categories, on_delete: :nilify_all}, []} + ]} + + assert execute_ddl(create) == [ + """ + CREATE TABLE foo.posts (id INTEGER PRIMARY KEY AUTOINCREMENT, + category_0 INTEGER CONSTRAINT posts_category_0_fkey REFERENCES foo.categories(id), + category_1 INTEGER CONSTRAINT foo_bar REFERENCES foo.categories(id), + category_2 INTEGER CONSTRAINT posts_category_2_fkey REFERENCES foo.categories(id), + category_3 INTEGER NOT NULL CONSTRAINT posts_category_3_fkey REFERENCES foo.categories(id) ON DELETE CASCADE, + category_4 INTEGER CONSTRAINT posts_category_4_fkey REFERENCES foo.categories(id) ON DELETE SET NULL) + """ + |> remove_newlines + ] + end + + test "create table with options" do + create = + {:create, table(:posts, options: "WITHOUT ROWID"), + [{:add, :id, :serial, [primary_key: true]}, {:add, :created_at, :datetime, []}]} + + assert execute_ddl(create) == + [ + ~s|CREATE TABLE posts (id INTEGER PRIMARY KEY AUTOINCREMENT, created_at DATETIME) WITHOUT ROWID| + ] + end + + test "create table with composite key" do + create = + {:create, table(:posts), + [ + {:add, :a, :integer, [primary_key: true]}, + {:add, :b, :integer, [primary_key: true]}, + {:add, :name, :string, []} + ]} + + assert execute_ddl(create) == [ + """ + CREATE TABLE posts (a INTEGER, b INTEGER, name TEXT, PRIMARY KEY (a, b)) + """ + |> remove_newlines + ] + end + + test "create table with bad table name" do + assert_raise ArgumentError, "bad table name \"po\\\"sts\"", fn -> + create = + {:create, table(:"po\"sts"), + [{:add, :id, :serial, [primary_key: true]}, {:add, :created_at, :datetime, []}]} + + execute_ddl(create) + end + end + + test "create table with bad column name" do + assert_raise ArgumentError, "bad field name \"crea\\\"ted_at\"", fn -> + create = + {:create, table(:posts), + [ + {:add, :id, :serial, [primary_key: true]}, + {:add, :"crea\"ted_at", :datetime, []} + ]} + + execute_ddl(create) + end + end + + test "create table with a map column, and an empty map default" do + create = + {:create, table(:posts), + [ + {:add, :a, :map, [default: %{}]} + ]} + + assert execute_ddl(create) == [~s|CREATE TABLE posts ("a" TEXT DEFAULT '{}')|] + end + + test "create table with a map column, and a map default with values" do + create = + {:create, table(:posts), + [ + {:add, :a, :map, [default: %{foo: "bar", baz: "boom"}]} + ]} + + assert execute_ddl(create) == [ + ~s|CREATE TABLE posts (a TEXT DEFAULT '{"foo":"bar","baz":"boom"}')| + ] + end + + test "create table with a map column, and a string default" do + create = + {:create, table(:posts), + [ + {:add, :a, :map, [default: ~s|{"foo":"bar","baz":"boom"}|]} + ]} + + assert execute_ddl(create) == [ + ~s|CREATE TABLE posts (a TEXT DEFAULT '{"foo":"bar","baz":"boom"}')| + ] + end + + test "drop table" do + drop = {:drop, table(:posts)} + assert execute_ddl(drop) == [~s|DROP TABLE posts|] + end + + test "drop table if exists" do + assert execute_ddl({:drop_if_exists, %Table{name: "posts"}}) == [ + ~s|DROP TABLE IF EXISTS posts| + ] + end + + test "drop table with prefix" do + drop = {:drop, table(:posts, prefix: :foo)} + assert execute_ddl(drop) == [~s|DROP TABLE foo.posts|] + end + + test "alter table" do + alter = + {:alter, table(:posts), + [ + {:add, :title, :string, [default: "Untitled", size: 100, null: false]}, + {:add, :author_id, %Reference{table: :author}, []} + ]} + + assert execute_ddl(alter) == [ + remove_newlines( + ~s|ALTER TABLE posts ADD COLUMN title TEXT DEFAULT 'Untitled' NOT NULL| + ), + remove_newlines( + ~s|ALTER TABLE posts ADD COLUMN author_id INTEGER CONSTRAINT posts_author_id_fkey REFERENCES author(id)| + ) + ] + end + + test "alter table with datetime not null" do + alter = + {:alter, table(:posts), + [ + {:add, :title, :string, [default: "Untitled", size: 100, null: false]}, + {:add, :when, :utc_datetime, [null: false]} + ]} + + assert execute_ddl(alter) == [ + remove_newlines( + ~s|ALTER TABLE posts ADD COLUMN title TEXT DEFAULT 'Untitled' NOT NULL| + ), + remove_newlines(~s|ALTER TABLE posts ADD COLUMN when UTC_DATETIME|) + ] + end + + test "alter table with prefix" do + alter = + {:alter, table(:posts, prefix: :foo), + [ + {:add, :title, :string, [default: "Untitled", size: 100, null: false]}, + {:add, :author_id, %Reference{table: :author}, []} + ]} + + assert execute_ddl(alter) == [ + ~s|ALTER TABLE foo.posts ADD COLUMN title TEXT DEFAULT 'Untitled' NOT NULL|, + ~s|ALTER TABLE foo.posts ADD COLUMN author_id INTEGER CONSTRAINT posts_author_id_fkey REFERENCES foo.author(id)| + ] + end + + test "alter column errors for :modify column" do + alter = + {:alter, table(:posts), [{:modify, :price, :numeric, [precision: 8, scale: 2]}]} + + assert_raise ArgumentError, "ALTER COLUMN not supported by SQLite", fn -> + execute_ddl(alter) + end + end + + test "alter column errors for :remove column" do + alter = + {:alter, table(:posts), [{:remove, :price, :numeric, [precision: 8, scale: 2]}]} + + assert_raise ArgumentError, "ALTER COLUMN not supported by SQLite", fn -> + execute_ddl(alter) + end + end + + test "alter table with primary key" do + alter = {:alter, table(:posts), [{:add, :my_pk, :serial, [primary_key: true]}]} + + assert execute_ddl(alter) == [ + ~s|ALTER TABLE posts ADD COLUMN my_pk INTEGER PRIMARY KEY AUTOINCREMENT| + ] + end + + test "create index" do + create = {:create, index(:posts, [:category_id, :permalink])} + + assert execute_ddl(create) == + [ + ~s|CREATE INDEX posts_category_id_permalink_index ON posts (category_id, permalink)| + ] + + create = {:create, index(:posts, ["lower(permalink)"], name: "postsmain")} + + assert execute_ddl(create) == + [~s|CREATE INDEX postsmain ON posts (lower(permalink))|] + end + + test "create index if not exists" do + create = {:create_if_not_exists, index(:posts, [:category_id, :permalink])} + query = execute_ddl(create) + + assert query == [ + ~s|CREATE INDEX IF NOT EXISTS posts_category_id_permalink_index ON posts (category_id, permalink)| + ] + end + + test "create index with prefix" do + create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo)} + + assert execute_ddl(create) == + [ + ~s|CREATE INDEX posts_category_id_permalink_index ON foo.posts (category_id, permalink)| + ] + + create = + {:create, index(:posts, ["lower(permalink)"], name: "postsmain", prefix: :foo)} + + assert execute_ddl(create) == + [~s|CREATE INDEX postsmain ON foo.posts (lower(permalink))|] + end + + test "create index with comment" do + create = + {:create, + index(:posts, [:category_id, :permalink], prefix: :foo, comment: "comment")} + + assert execute_ddl(create) == [ + remove_newlines(""" + CREATE INDEX posts_category_id_permalink_index ON foo.posts (category_id, permalink) + """) + ] + + # NOTE: Comments are not supported by SQLite. DDL query generator will ignore them. + end + + test "create unique index" do + create = {:create, index(:posts, [:permalink], unique: true)} + + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX posts_permalink_index ON posts (permalink)|] + end + + test "create unique index if not exists" do + create = {:create_if_not_exists, index(:posts, [:permalink], unique: true)} + query = execute_ddl(create) + + assert query == [ + ~s|CREATE UNIQUE INDEX IF NOT EXISTS posts_permalink_index ON posts (permalink)| + ] + end + + test "create unique index with condition" do + create = {:create, index(:posts, [:permalink], unique: true, where: "public IS 1")} + + assert execute_ddl(create) == + [ + ~s|CREATE UNIQUE INDEX posts_permalink_index ON posts (permalink) WHERE public IS 1| + ] + + create = {:create, index(:posts, [:permalink], unique: true, where: :public)} + + assert execute_ddl(create) == + [ + ~s|CREATE UNIQUE INDEX posts_permalink_index ON posts (permalink) WHERE public| + ] + end + + test "create index concurrently" do + # NOTE: SQLite doesn't support CONCURRENTLY, so this isn't included in generated SQL. + create = {:create, index(:posts, [:permalink], concurrently: true)} + + assert execute_ddl(create) == + [~s|CREATE INDEX posts_permalink_index ON posts (permalink)|] + end + + test "create unique index concurrently" do + # NOTE: SQLite doesn't support CONCURRENTLY, so this isn't included in generated SQL. + create = {:create, index(:posts, [:permalink], concurrently: true, unique: true)} + + assert execute_ddl(create) == + [~s|CREATE UNIQUE INDEX posts_permalink_index ON posts (permalink)|] + end + + test "create an index using a different type" do + # NOTE: SQLite doesn't support USING, so this isn't included in generated SQL. + create = {:create, index(:posts, [:permalink], using: :hash)} + + assert execute_ddl(create) == + [~s|CREATE INDEX posts_permalink_index ON posts (permalink)|] + end + + test "drop index" do + drop = {:drop, index(:posts, [:id], name: "postsmain")} + assert execute_ddl(drop) == [~s|DROP INDEX postsmain|] + end + + test "drop index with prefix" do + drop = {:drop, index(:posts, [:id], name: "postsmain", prefix: :foo)} + assert execute_ddl(drop) == [~s|DROP INDEX foo.postsmain|] + end + + test "drop index if exists" do + drop = {:drop_if_exists, index(:posts, [:id], name: "postsmain")} + assert execute_ddl(drop) == [~s|DROP INDEX IF EXISTS postsmain|] + end + + test "drop index concurrently" do + # NOTE: SQLite doesn't support CONCURRENTLY, so this isn't included in generated SQL. + drop = {:drop, index(:posts, [:id], name: "postsmain", concurrently: true)} + assert execute_ddl(drop) == [~s|DROP INDEX postsmain|] + end + + test "create check constraint" do + create = + {:create, constraint(:products, "price_must_be_positive", check: "price > 0")} + + assert_raise ArgumentError, + "SQLite3 adapter does not support check constraints", + fn -> + execute_ddl(create) + end + + create = + {:create, + constraint(:products, "price_must_be_positive", check: "price > 0", prefix: "foo")} + + assert_raise ArgumentError, + "SQLite3 adapter does not support check constraints", + fn -> + execute_ddl(create) + end + end + + test "create exclusion constraint" do + create = + {:create, + constraint(:products, "price_must_be_positive", + exclude: ~s|gist (int4range("from", "to", '[]') WITH &&)| + )} + + assert_raise ArgumentError, + "SQLite3 adapter does not support exclusion constraints", + fn -> + execute_ddl(create) + end + end + + test "create constraint with comment" do + assert_raise ArgumentError, + "SQLite3 adapter does not support check constraints", + fn -> + create = + {:create, + constraint(:products, "price_must_be_positive", + check: "price > 0", + prefix: "foo", + comment: "comment" + )} + + execute_ddl(create) + end + end + + test "drop constraint" do + drop = {:drop, constraint(:products, "price_must_be_positive")} + + assert_raise ArgumentError, + "SQLite3 adapter does not support constraints", + fn -> + execute_ddl(drop) + end + + drop = {:drop, constraint(:products, "price_must_be_positive", prefix: "foo")} + + assert_raise ArgumentError, + "SQLite3 adapter does not support constraints", + fn -> + execute_ddl(drop) + end + end + + test "rename table" do + rename = {:rename, table(:posts), table(:new_posts)} + assert execute_ddl(rename) == [~s|ALTER TABLE posts RENAME TO new_posts|] + end + + test "rename table with prefix" do + rename = {:rename, table(:posts, prefix: :foo), table(:new_posts, prefix: :foo)} + assert execute_ddl(rename) == [~s|ALTER TABLE foo.posts RENAME TO foo.new_posts|] + end + + test "rename column" do + rename = {:rename, table(:posts), :given_name, :first_name} + + assert execute_ddl(rename) == [ + ~s|ALTER TABLE posts RENAME COLUMN given_name TO first_name| + ] + end + + test "rename column in prefixed table" do + rename = {:rename, table(:posts, prefix: :foo), :given_name, :first_name} + + assert execute_ddl(rename) == [ + ~s|ALTER TABLE foo.posts RENAME COLUMN given_name TO first_name| + ] + end + + # test "drop column errors" do + # alter = {:alter, table(:posts), [{:remove, :summary}]} + + # assert_raise ArgumentError, "DROP COLUMN not supported by SQLite", fn -> + # execute_ddl(alter) + # end + # end + + test "datetime_add with microsecond" do + assert_raise ArgumentError, + "SQLite does not support microsecond precision in datetime intervals", + fn -> + TestRepo.all( + from(p in Post, + select: datetime_add(p.inserted_at, 1500, "microsecond") + ) + ) + end + end + + # test "stream error handling" do + # opts = [database: ":memory:", backoff_type: :stop] + # {:ok, pid} = DBConnection.start_link(Exqlite.Protocol, opts) + + # query = %Exqlite.Query{name: "", statement: "CREATE TABLE uniques (a int UNIQUE)"} + # {:ok, _, _} = DBConnection.prepare_execute(pid, query, []) + + # query = %Exqlite.Query{name: "", statement: "INSERT INTO uniques VALUES(1)"} + # {:ok, _, _} = DBConnection.prepare_execute(pid, query, []) + + # assert_raise Exqlite.Error, "UNIQUE constraint failed: uniques.a", fn -> + # pid + # |> SQL.stream("INSERT INTO uniques VALUES(1)", [], []) + # |> Enum.to_list() + # end + # end +end diff --git a/test/support/case.ex b/test/support/case.ex new file mode 100644 index 00000000..d65e8843 --- /dev/null +++ b/test/support/case.ex @@ -0,0 +1,7 @@ +defmodule Ecto.Integration.Case do + use ExUnit.CaseTemplate + + setup do + :ok = Ecto.Adapters.SQL.Sandbox.checkout(Ecto.Integration.TestRepo) + end +end diff --git a/test/support/migration.ex b/test/support/migration.ex new file mode 100644 index 00000000..8882fa34 --- /dev/null +++ b/test/support/migration.ex @@ -0,0 +1,123 @@ +defmodule Ecto.Integration.Migration do + use Ecto.Migration + + def change do + # IO.puts "TESTING MIGRATION LOCK" + # Process.sleep(10000) + + create table(:users, comment: "users table") do + add(:name, :string, comment: "name column") + add(:custom_id, :uuid) + timestamps() + end + + create table(:posts) do + add(:title, :string, size: 100) + add(:counter, :integer) + add(:blob, :binary) + add(:bid, :binary_id) + add(:uuid, :uuid) + add(:meta, :map) + add(:links, {:map, :string}) + add(:intensities, {:map, :float}) + add(:public, :boolean) + add(:cost, :decimal, precision: 2, scale: 1) + add(:visits, :integer) + add(:wrapped_visits, :integer) + add(:intensity, :float) + add(:author_id, :integer) + add(:posted, :date) + timestamps(null: true) + end + + create table(:posts_users, primary_key: false) do + add(:post_id, references(:posts)) + add(:user_id, references(:users)) + end + + create table(:posts_users_pk) do + add(:post_id, references(:posts)) + add(:user_id, references(:users)) + timestamps() + end + + # Add a unique index on uuid. We use this + # to verify the behaviour that the index + # only matters if the UUID column is not NULL. + create(unique_index(:posts, [:uuid], comment: "posts index")) + + create table(:permalinks) do + add(:uniform_resource_locator, :string) + add(:title, :string) + add(:post_id, references(:posts)) + add(:user_id, references(:users)) + end + + create(unique_index(:permalinks, [:post_id])) + create(unique_index(:permalinks, [:uniform_resource_locator])) + + create table(:comments) do + add(:text, :string, size: 100) + add(:lock_version, :integer, default: 1) + add(:post_id, references(:posts)) + add(:author_id, references(:users)) + end + + create table(:customs, primary_key: false) do + add(:bid, :binary_id, primary_key: true) + add(:uuid, :uuid) + end + + create(unique_index(:customs, [:uuid])) + + create table(:customs_customs, primary_key: false) do + add(:custom_id1, references(:customs, column: :bid, type: :binary_id)) + add(:custom_id2, references(:customs, column: :bid, type: :binary_id)) + end + + create table(:barebones) do + add(:num, :integer) + end + + create table(:transactions) do + add(:num, :integer) + end + + create table(:lock_counters) do + add(:count, :integer) + end + + create table(:orders) do + add(:item, :map) + add(:items, :map) + add(:meta, :map) + add(:permalink_id, references(:permalinks)) + end + + unless :array_type in ExUnit.configuration()[:exclude] do + create table(:tags) do + add(:ints, {:array, :integer}) + add(:uuids, {:array, :uuid}, default: []) + add(:items, {:array, :map}) + end + end + + create table(:composite_pk, primary_key: false) do + add(:a, :integer, primary_key: true) + add(:b, :integer, primary_key: true) + add(:name, :string) + end + + create table(:corrupted_pk, primary_key: false) do + add(:a, :string) + end + + create table(:posts_users_composite_pk) do + add(:post_id, references(:posts), primary_key: true) + add(:user_id, references(:users), primary_key: true) + timestamps() + end + + create(unique_index(:posts_users_composite_pk, [:post_id, :user_id])) + end +end diff --git a/test/support/repo.ex b/test/support/repo.ex new file mode 100644 index 00000000..dc3a478e --- /dev/null +++ b/test/support/repo.ex @@ -0,0 +1,43 @@ +defmodule Ecto.Integration.Repo do + defmacro __using__(opts) do + quote do + use Ecto.Repo, unquote(opts) + + @query_event __MODULE__ + |> Module.split() + |> Enum.map(&(&1 |> Macro.underscore() |> String.to_atom())) + |> Kernel.++([:query]) + + def init(_, opts) do + fun = &Ecto.Integration.Repo.handle_event/4 + :telemetry.attach_many(__MODULE__, [[:custom], @query_event], fun, :ok) + {:ok, opts} + end + end + end + + def handle_event(event, latency, metadata, _config) do + handler = Process.delete(:telemetry) || fn _, _, _ -> :ok end + handler.(event, latency, metadata) + end +end + +defmodule Ecto.Integration.TestRepo do + use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Exqlite + + def create_prefix(prefix) do + "create database #{prefix}" + end + + def drop_prefix(prefix) do + "drop database #{prefix}" + end + + def uuid do + Ecto.UUID + end +end + +defmodule Ecto.Integration.PoolRepo do + use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Exqlite +end diff --git a/test/test_helper.exs b/test/test_helper.exs index 869559e7..6b254019 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -1 +1,39 @@ +Logger.configure(level: :info) + +Application.put_env(:ecto, :primary_key_type, :id) +Application.put_env(:ecto, :async_integration_tests, false) + +ecto = Mix.Project.deps_paths()[:ecto] +Code.require_file("#{ecto}/integration_test/support/schemas.exs", __DIR__) + +Application.put_env(:ecto_sql, Ecto.Integration.TestRepo, + database: "/tmp/exqlite_sandbox_test.db", + pool: Ecto.Adapters.SQL.Sandbox, + show_sensitive_data_on_connection_error: true +) + +Application.put_env(:ecto_sql, Ecto.Integration.PoolRepo, + adapter: Ecto.Adapters.Exqlite, + database: "/tmp/exqlite_pool_test.db", + pool_size: 10, + show_sensitive_data_on_connection_error: true +) + +{:ok, _} = + Ecto.Adapters.Exqlite.ensure_all_started( + Ecto.Integration.TestRepo.config(), + :temporary + ) + +# Load up the repository, start it, and run migrations +_ = Ecto.Adapters.Exqlite.storage_down(Ecto.Integration.TestRepo.config()) +:ok = Ecto.Adapters.Exqlite.storage_up(Ecto.Integration.TestRepo.config()) + +{:ok, _pid} = Ecto.Integration.TestRepo.start_link() +{:ok, _pid} = Ecto.Integration.PoolRepo.start_link() + +# :ok = Ecto.Migrator.up(Ecto.Integration.TestRepo, 0, Ecto.Integration.Migration, log: false) +Ecto.Adapters.SQL.Sandbox.mode(Ecto.Integration.TestRepo, :manual) +Process.flag(:trap_exit, true) + ExUnit.start()