code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
defmodule ExBin.Formatter do @doc """ Pretty print a given `binary`. By default this will output in the same format as `hexdump -C <file>` from BSD general commands. This function returns a string, so you aren't bound to any certain I/O mechanism (i.e. you may not want to print this to stdout, so `IO.puts/1` is not used). The `opts` keyword list argument can contain the following options: - `:canonical` - either `true` or `false`; whether output should be in canonical format or not (`-C` flag to `hexdump` BSD command). Defaults to `true`. - `:start` - an integer value to start indexing at when outputting; defaults to zero. ## Examples iex> ExBin.Formatter.format_bytes(<<0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20>>, canonical: false) "0000000 0001 0203 0405 0607 0809 0a0b 0c0d 0e0f\\n0000010 1011 1213 14" iex> ExBin.Formatter.format_bytes(<<0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20>>, start: 0xa0, canonical: false) "00000a0 0001 0203 0405 0607 0809 0a0b 0c0d 0e0f\\n00000b0 1011 1213 14" ### Canonical format The `:canonical` option, which is the default, will produce an output as if you were running ``` hexdump -C <file> ``` iex> ExBin.Formatter.format_bytes("Foo:bar\\nfizz\\tbang" <> <<0xf9, 0xd4>>) "00000000 46 6f 6f 3a 62 61 72 0a 66 69 7a 7a 09 62 61 6e |Foo:bar.fizz.ban|\\n00000010 67 f9 d4 |g..|" """ def format_bytes(binary, opts \\ []) when is_binary(binary) do start = case Keyword.fetch(opts, :start) do {:ok, start} -> start :error -> 0 end formatter = case Keyword.fetch(opts, :canonical) do {:ok, false} -> &format_uncanonical/2 _ -> &format_canonical/2 end binary |> Base.encode16(case: :lower) |> formatter.(start) end defp format_canonical(hex_string, start) do hex_string |> String.graphemes() |> Enum.chunk_every(2) |> Enum.map(&Enum.join/1) |> Enum.chunk_every(16) |> Enum.with_index(start) |> Enum.map(fn {bytes, index} -> index_str = String.pad_leading(Base.encode16(<<index * 16 + start>>, case: :lower), 8, "0") first_eight = Enum.slice(bytes, 0..7) last_eight = Enum.slice(bytes, 8..15) printable = bytes |> Enum.map(fn hex_byte -> {:ok, byte} = Base.decode16(hex_byte, case: :lower) if String.printable?(byte) do printable_str = String.trim(inspect(byte), "\"") if String.length(printable_str) == 1 do printable_str else "." end else "." end end) |> Enum.join() res_str = index_str <> " " <> Enum.join(first_eight, " ") <> " " <> Enum.join(last_eight, " ") String.pad_trailing(res_str, 58) <> " |#{printable}|" end) |> Enum.join("\n") end defp format_uncanonical(hex_string, start) do hex_string |> String.graphemes() |> Enum.chunk_every(4) |> Enum.map(&Enum.join/1) |> Enum.chunk_every(8) |> Enum.with_index(start) |> Enum.map(fn {bytes, index} -> index_str = String.pad_leading(Base.encode16(<<index * 16 + start>>, case: :lower), 7, "0") index_str <> " " <> Enum.join(bytes, " ") end) |> Enum.join("\n") end @doc """ Formats a bitstring into a string of ones and zeroes. `opts` allows you to optionally set some style settings. Currently, only one is supported: - `:spacer` - if `true`, this will insert a space between each set of 8 bits; defaults to `false`. ## Examples iex> ExBin.Formatter.format_bits(<<0b1011011100010101::16>>) "1011011100010101" iex> ExBin.Formatter.format_bits(<<0b1011011100010101::16>>, spacer: true) "10110111 00010101" """ def format_bits(bitstr, opts \\ []) when is_bitstring(bitstr) do add_spacer = case Keyword.fetch(opts, :spacer) do {:ok, true} -> true _ -> false end bits = ExBin.bit_stream(bitstr) if add_spacer do bits |> Enum.chunk_every(8) |> Enum.map(&Enum.join/1) |> Enum.join(" ") else bits |> Enum.join() end end end
lib/ex_bin/formatter.ex
0.805211
0.720368
formatter.ex
starcoder
defmodule Wallaby do @moduledoc """ A concurrent feature testing library. ## Configuration Wallaby supports the following options: * `:pool_size` - Maximum amount of phantoms to run. The default is `:erlang.system_info(:schedulers_online) * 2`. * `:screenshot_dir` - The directory to store screenshots. * `:screenshot_on_failure` - if Wallaby should take screenshots on test failures (defaults to `false`). * `:max_wait_time` - The amount of time that Wallaby should wait to find an element on the page. (defaults to `3_000`) * `:js_errors` - if Wallaby should re-throw javascript errors in elixir (defaults to true). * `:js_logger` - IO device where javascript console logs are written to. Defaults to :stdio. This option can also be set to a file or any other io device. You can disable javascript console logging by setting this to `nil`. * `:phantomjs` - The path to the phantomjs executable (defaults to "phantomjs") * `:phantomjs_args` - Any extra arguments that should be passed to phantomjs (defaults to "") """ use Application alias Wallaby.Session alias Wallaby.SessionStore @doc false def start(_type, _args) do import Supervisor.Spec, warn: false case driver().validate() do :ok -> :ok {:error, exception} -> raise exception end children = [ supervisor(Wallaby.Driver.ProcessWorkspace.ServerSupervisor, []), supervisor(driver(), [[name: Wallaby.Driver.Supervisor]]), :hackney_pool.child_spec(:wallaby_pool, timeout: 15_000, max_connections: 4), worker(Wallaby.SessionStore, []) ] opts = [strategy: :one_for_one, name: Wallaby.Supervisor] Supervisor.start_link(children, opts) end @type reason :: any @type start_session_opts :: {atom, any} @doc """ Starts a browser session. ## Multiple sessions Each session runs in its own browser so that each test runs in isolation. Because of this isolation multiple sessions can be created for a test: ``` @message_field Query.text_field("Share Message") @share_button Query.button("Share") @message_list Query.css(".messages") test "That multiple sessions work" do {:ok, user1} = Wallaby.start_session user1 |> visit("/page.html") |> fill_in(@message_field, with: "Hello there!") |> click(@share_button) {:ok, user2} = Wallaby.start_session user2 |> visit("/page.html") |> fill_in(@message_field, with: "Hello yourself") |> click(@share_button) assert user1 |> find(@message_list) |> List.last |> text == "Hello yourself" assert user2 |> find(@message_list) |> List.first |> text == "Hello there" end ``` """ @spec start_session([start_session_opts]) :: {:ok, Session.t()} | {:error, reason} def start_session(opts \\ []) do with {:ok, session} <- driver().start_session(opts), :ok <- SessionStore.monitor(session), do: {:ok, session} end @doc """ Ends a browser session. """ @spec end_session(Session.t()) :: :ok | {:error, reason} def end_session(%Session{driver: driver} = session) do with :ok <- SessionStore.demonitor(session), :ok <- driver.end_session(session), do: :ok end @doc false def screenshot_on_failure? do Application.get_env(:wallaby, :screenshot_on_failure) end @doc false def js_errors? do Application.get_env(:wallaby, :js_errors, true) end @doc false def js_logger do Application.get_env(:wallaby, :js_logger, :stdio) end def driver do case System.get_env("WALLABY_DRIVER") do "chrome" -> Wallaby.Experimental.Chrome "selenium" -> Wallaby.Experimental.Selenium "phantom" -> Wallaby.Phantom _ -> Application.get_env(:wallaby, :driver, Wallaby.Phantom) end end end
lib/wallaby.ex
0.819352
0.695235
wallaby.ex
starcoder
defmodule Tds do @moduledoc """ Microsoft SQL Server driver for Elixir. Tds is partial implementation of the Micorosoft SQL Server [MS-TDS](https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-tds) Tabular Data Stream Protocol. A Tds query is performed in separate server-side prepare and execute stages. At the moment query handle is not reused, but there is plan to cahce handles in near feature. It uses [RPC](https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-tds/619c43b6-9495-4a58-9e49-a4950db245b3) requests by default to `Sp_Prepare` (ProcId=11) and `Sp_Execute` (ProcId=12) query, but it is possible to configure driver to use only Sp_ExecuteSql. Please consult with [configuration](readme.html#configuration) how to do this. """ alias Tds.Query @timeout 5000 @execution_mode :prepare_execute @type start_option :: {:hostname, String.t()} | {:port, :inet.port_number()} | {:database, String.t()} | {:username, String.t()} | {:password, String.t()} | {:timeout, timeout} | {:connect_timeout, timeout} | DBConnection.start_option() @type isolation_level :: :read_uncommitted | :read_committed | :repeatable_read | :serializable | :snapshot | :no_change @type conn :: DBConnection.conn() @type resultset :: list(Tds.Result.t()) @type option :: DBConnection.option() @type transaction_option :: {:mode, :transaction | :savepoint} | {:isolation_level, isolation_level()} | option() @type execute_option :: {:decode_mapper, (list -> term)} | {:resultset, boolean()} | option @spec start_link([start_option]) :: {:ok, conn} | {:error, Tds.Error.t() | term} def start_link(opts \\ []) do DBConnection.start_link(Tds.Protocol, default(opts)) end @spec query(conn, iodata, list, [execute_option]) :: {:ok, Tds.Result.t()} | {:error, Exception.t()} def query(conn, statement, params, opts \\ []) do query = %Query{statement: statement} opts = Keyword.put_new(opts, :parameters, params) case DBConnection.prepare_execute(conn, query, params, opts) do {:ok, _query, result} -> {:ok, result} {:error, err} -> {:error, err} end end @spec query!(conn, iodata, list, [execute_option]) :: Tds.Result.t() | no_return() def query!(conn, statement, params, opts \\ []) do query = %Query{statement: statement} opts = Keyword.put_new(opts, :parameters, params) case DBConnection.prepare_execute(conn, query, params, opts) do {:ok, _query, result} -> result {:error, %{mssql: %{msg_text: msg}}} -> raise Tds.Error, msg {:error, err} -> raise err end end @doc """ Executes statement that can contain multiple sql batches, result will contain all results that server yield for each batch. """ @spec query_multi(conn(), iodata(), option(), [execute_option]) :: {:ok, resultset()} | {:error, Exception.t()} def query_multi(conn, statemnt, params, opts \\ []) do query = %Query{statement: statemnt} opts = opts |> Keyword.put_new(:parameters, params) |> Keyword.put_new(:resultset, true) case DBConnection.prepare_execute(conn, query, params, opts) do {:ok, _query, resultset} -> {:ok, resultset} {:error, err} -> {:error, err} end end @spec prepare(conn, iodata, [option]) :: {:ok, Tds.Query.t()} | {:error, Exception.t()} def prepare(conn, statement, opts \\ []) do query = %Query{statement: statement} case DBConnection.prepare(conn, query, opts) do {:ok, query} -> {:ok, query} {:error, err} -> {:error, err} end end @spec prepare!(conn, iodata, [option]) :: Tds.Query.t() | no_return() def prepare!(conn, statement, opts \\ []) do query = %Query{statement: statement} case DBConnection.prepare(conn, query, opts) do {:ok, query} -> query {:error, %{mssql: %{msg_text: msg}}} -> raise Tds.Error, msg {:error, err} -> raise err end end def proc(pid, statement, params, opts \\ []) do opts = Keyword.put_new(opts, :proc, statement) query(pid, statement, params, opts) end @spec execute(conn, Tds.Query.t(), list, [execute_option]) :: {:ok, Tds.Query.t(), Tds.Result.t()} | {:error, Tds.Error.t()} def execute(conn, query, params, opts \\ []) do case DBConnection.execute(conn, query, params, opts) do {:ok, q, result} -> {:ok, q, result} {:error, err} -> {:error, err} end end @spec execute!(conn, Tds.Query.t(), list, [execute_option]) :: Tds.Result.t() def execute!(conn, query, params, opts \\ []) do case DBConnection.execute(conn, query, params, opts) do {:ok, _q, result} -> result {:error, %{mssql: %{msg_text: msg}}} -> raise Tds.Error, msg {:error, err} -> raise err end end @spec close(conn, Tds.Query.t(), [option]) :: :ok | {:error, Exception.t()} def close(conn, query, opts \\ []) do case DBConnection.close(conn, query, opts) do {:ok, result} -> {:ok, result} {:error, err} -> {:error, err} end end @spec close!(conn, Tds.Query.t(), [option]) :: :ok def close!(conn, query, opts \\ []) do case DBConnection.close(conn, query, opts) do {:ok, result} -> result {:error, %{mssql: %{msg_text: msg}}} -> raise Tds.Error, msg {:error, err} -> raise err end end @spec transaction(conn, (DBConnection.t() -> result), [transaction_option()]) :: {:ok, result} | {:error, any} when result: var def transaction(conn, fun, opts \\ []) do DBConnection.transaction(conn, fun, opts) end @spec rollback(DBConnection.t(), reason :: any) :: no_return defdelegate rollback(conn, any), to: DBConnection @spec child_spec([start_option]) :: Supervisor.Spec.spec() def child_spec(opts) do DBConnection.child_spec(Tds.Protocol, default(opts)) end defp default(opts) do opts |> Keyword.put_new(:idle_timeout, @timeout) |> Keyword.put_new(:execution_mode, @execution_mode) end @doc """ Returns the configured JSON library. To customize the JSON library, include the following in your `config/config.exs`: config :tds, json_library: SomeJSONModule Defaults to `Jason`. """ @spec json_library() :: module() def json_library() do Application.fetch_env!(:tds, :json_library) end @doc """ Generates a version 4 (random) UUID in the MS uniqueidentifier binary format. """ @spec generate_uuid :: <<_::128>> def generate_uuid(), do: Tds.Types.UUID.bingenerate() @doc """ Decodes MS uniqueidentifier binary to its string representation """ def decode_uuid(uuid), do: Tds.Types.UUID.load(uuid) @doc """ Same as `decode_uuid/1` but raises `ArgumentError` if value is invalid """ def decode_uuid!(uuid) do case Tds.Types.UUID.load(uuid) do {:ok, value} -> value :error -> raise ArgumentError, "Invalid uuid binary #{inspect(uuid)}" end end @doc """ Encodes uuid string into MS uniqueidentifier binary """ @spec encode_uuid(any) :: :error | {:ok, <<_::128>>} def encode_uuid(value), do: Tds.Types.UUID.dump(value) @doc """ Same as `encode_uuid/1` but raises `ArgumentError` if value is invalid """ @spec encode_uuid!(any) :: <<_::128>> def encode_uuid!(value), do: Tds.Types.UUID.dump!(value) end
lib/tds.ex
0.839109
0.40248
tds.ex
starcoder
defmodule Phoenix.Ecto.SQL.Sandbox do @moduledoc """ A plug to allow concurrent, transactional acceptance tests with [`Ecto.Adapters.SQL.Sandbox`] (https://hexdocs.pm/ecto_sql/Ecto.Adapters.SQL.Sandbox.html). ## Example This plug should only be used during tests. First, set a flag to enable it in `config/test.exs`: config :your_app, sql_sandbox: true And use the flag to conditionally add the plug to `lib/your_app/endpoint.ex`: if Application.compile_env(:your_app, :sql_sandbox) do plug Phoenix.Ecto.SQL.Sandbox end It's important that this is at the top of `endpoint.ex`, before any other plugs. Then, within an acceptance test, checkout a sandboxed connection as before. Use `metadata_for/2` helper to get the session metadata to that will allow access to the test's connection. Here's an example using [Hound](https://hex.pm/packages/hound): use Hound.Helpers setup do :ok = Ecto.Adapters.SQL.Sandbox.checkout(YourApp.Repo) metadata = Phoenix.Ecto.SQL.Sandbox.metadata_for(YourApp.Repo, self()) Hound.start_session(metadata: metadata) :ok end ## Supporting socket connections To support socket connections the spawned processes need access to the header used for transporting the metadata. By default this is the user agent header, but you can also use custom `X-`-headers. socket "/path", Socket, websocket: [connect_info: [:user_agent, …]] socket "/path", Socket, websocket: [connect_info: [:x_headers, …]] To fetch the value you either use `connect_info[:user_agent]` or for a custom header: Enum.find_value(connect_info.x_headers, fn {"x-my-custom-header", val} -> val _ -> false end) ### Channels For channels, `:connect_info` data is available to any of your Sockets' `c:Phoenix.Socket.connect/3` callbacks: # user_socket.ex def connect(_params, socket, connect_info) do {:ok, assign(socket, :phoenix_ecto_sandbox, connect_info[:user_agent])} end This stores the value on the socket, so it can be available to all of your channels for allowing the sandbox. # room_channel.ex def join("room:lobby", _payload, socket) do allow_ecto_sandbox(socket) {:ok, socket} end # This is a great function to extract to a helper module defp allow_ecto_sandbox(socket) do Phoenix.Ecto.SQL.Sandbox.allow( socket.assigns.phoenix_ecto_sandbox, Ecto.Adapters.SQL.Sandbox ) end `allow/2` needs to be manually called once for each channel, at best directly at the start of `c:Phoenix.Channel.join/3`. ### LiveView LiveViews can be supported in a similar fashion than channels, but using the `c:Phoenix.LiveView.mount/3` callback. def mount(_, _, socket) do allow_ecto_sandbox(socket) … end # This is a great function to extract to a helper module defp allow_ecto_sandbox(socket) do %{assigns: %{phoenix_ecto_sandbox: metadata}} = assign_new(socket, :phoenix_ecto_sandbox, fn -> if connected?(socket), do: get_connect_info(socket)[:user_agent] end) Phoenix.Ecto.SQL.Sandbox.allow(metadata, Ecto.Adapters.SQL.Sandbox) end This is a bit more complex than the channel code, because LiveViews not only are their own processes when spawned via a socket connection, but also when doing the static render as part of the plug pipeline. Given `get_connect_info/1` is only available for socket connections, this uses the `:phoenix_ecto_sandbox` assign of the rendering `conn` for the static render. ## Concurrent end-to-end tests with external clients Concurrent and transactional tests for external HTTP clients is supported, allowing for complete end-to-end tests. This is useful for cases such as JavaScript test suites for single page applications that exercise the Phoenix endpoint for end-to-end test setup and teardown. To enable this, you can expose a sandbox route on the `Phoenix.Ecto.SQL.Sandbox` plug by providing the `:at`, and `:repo` options. For example: plug Phoenix.Ecto.SQL.Sandbox, at: "/sandbox", repo: MyApp.Repo, timeout: 15_000 # the default This would expose a route at `"/sandbox"` for the given repo where external clients send POST requests to spawn a new sandbox session, and DELETE requests to stop an active sandbox session. By default, the external client is expected to pass up the `"user-agent"` header containing serialized sandbox metadata returned from the POST request, but this value may customized with the `:header` option. """ import Plug.Conn alias Plug.Conn alias Phoenix.Ecto.SQL.{SandboxSession, SandboxSupervisor} @doc """ Spawns a sandbox session to checkout a connection for a remote client. ## Examples iex> {:ok, _owner_pid, metadata} = start_child(MyApp.Repo) """ def start_child(repo, opts \\ []) do child_spec = {SandboxSession, {repo, self(), opts}} case DynamicSupervisor.start_child(SandboxSupervisor, child_spec) do {:ok, owner} -> metadata = metadata_for(repo, owner) {:ok, owner, metadata} {:error, reason} -> {:error, reason} end end @doc """ Stops a sandbox session holding a connection for a remote client. ## Examples iex> {:ok, owner_pid, metadata} = start_child(MyApp.Repo) iex> :ok = stop(owner_pid) """ def stop(owner) when is_pid(owner) do GenServer.call(owner, :checkin) end @doc false def init(opts \\ []) do session_opts = Keyword.take(opts, [:sandbox, :timeout]) %{ header: Keyword.get(opts, :header, "user-agent"), path: get_path_info(opts[:at]), repo: opts[:repo], sandbox: session_opts[:sandbox] || Ecto.Adapters.SQL.Sandbox, session_opts: session_opts } end defp get_path_info(nil), do: nil defp get_path_info(path), do: Plug.Router.Utils.split(path) @doc false def call(%Conn{method: "POST", path_info: path} = conn, %{path: path} = opts) do %{repo: repo, session_opts: session_opts} = opts {:ok, _owner, metadata} = start_child(repo, session_opts) conn |> put_resp_content_type("text/plain") |> send_resp(200, encode_metadata(metadata)) |> halt() end def call(%Conn{method: "DELETE", path_info: path} = conn, %{path: path} = opts) do case decode_metadata(extract_header(conn, opts.header)) do %{owner: owner} -> :ok = stop(owner) conn |> put_resp_content_type("text/plain") |> send_resp(200, "") |> halt() %{} -> conn |> send_resp(410, "") |> halt() end end def call(conn, %{header: header, sandbox: sandbox}) do header = extract_header(conn, header) allow(header, sandbox) assign(conn, :phoenix_ecto_sandbox, header) end defp extract_header(%Conn{} = conn, header) do conn |> get_req_header(header) |> List.first() end @doc """ Returns metadata to establish a sandbox for. The metadata is then passed via user-agent/headers to browsers. Upon request, the `Phoenix.Ecto.SQL.Sandbox` plug will decode the header and allow the request process under the sandbox. ## Options * `:trap_exit` - if the browser being used for integration testing navigates away from a page or aborts a AJAX request while the request process is talking to the database, it will corrupt the database connection and make the test fail. Therefore, to avoid intermitent tests, we recommend trapping exits in the request process, so all database connections shut down cleanly. You can disable this behaviour by setting the option to false. """ @spec metadata_for(Ecto.Repo.t() | [Ecto.Repo.t()], pid, keyword) :: map def metadata_for(repo_or_repos, pid, opts \\ []) when is_pid(pid) do %{repo: repo_or_repos, owner: pid, trap_exit: Keyword.get(opts, :trap_exit, true)} end @doc """ Encodes metadata generated by `metadata_for/2` for client response. """ def encode_metadata(metadata) do encoded = {:v1, metadata} |> :erlang.term_to_binary() |> Base.url_encode64() "BeamMetadata (#{encoded})" end @doc """ Decodes encoded metadata back into map generated from `metadata_for/2`. """ def decode_metadata(encoded_meta) when is_binary(encoded_meta) do case encoded_meta |> String.split("/") |> List.last() do "BeamMetadata (" <> metadata -> metadata |> binary_part(0, byte_size(metadata) - 1) |> parse_metadata() _ -> %{} end end def decode_metadata(_), do: %{} defp parse_metadata(encoded_metadata) do encoded_metadata |> Base.url_decode64!() |> :erlang.binary_to_term() |> case do {:v1, metadata} -> metadata _ -> %{} end end @doc """ Decodes the given metadata and allows the current process under the given sandbox. """ def allow(encoded_metadata, sandbox) when is_binary(encoded_metadata) do metadata = decode_metadata(encoded_metadata) with %{trap_exit: true} <- metadata do Process.flag(:trap_exit, true) end allow(metadata, sandbox) end def allow(%{repo: repo, owner: owner}, sandbox), do: Enum.each(List.wrap(repo), &sandbox.allow(&1, owner, self())) def allow(%{}, _sandbox), do: :ok def allow(nil, _sandbox), do: :ok end
deps/phoenix_ecto/lib/phoenix_ecto/sql/sandbox.ex
0.914295
0.531574
sandbox.ex
starcoder
defmodule Zig.Parser.Nif do @moduledoc """ This datastructure represents structured information about a single nif inside of a `Zig.sigil_Z/2` block. This is used to generate the `exported_nifs` variable which is an array of `ErlNifFunc` structs. The following keys are implemented: - name: (`t:atom/0`) the function name to be bound into the module - arity: (`t:arity/0`) the arity of the erlang function (the zig function may have a different arity). - doc: (`t:iodata/0`) zig docstrings which should be turned into elixir docs - args: (`t:String.t/0`) a list of zig types which are the arguments for the function - retval: (`t:String.t/0`) the type of the return value - opts: (`t:keyword`) list of nif options. These options are currently supported: - `concurrency: <model>` picks a [long-running nif](http://erlang.org/doc/man/erl_nif.html#lengthy_work) concurrency model. The following concurrency models are supported: - :threaded -- if the nif should run in a separate OS thread. - :yielding -- if the nif should use zig's `yield` keyword to yield to the BEAM scheduler. - :dirty_cpu -- if the nif should run in a dirty cpu scheduler. - :dirty_io -- if the nif should run in a dirty io scheduler. """ alias Zig.Parser.Resource @float_types ~w(f16 f32 f64) @int_types ~w(u16 i32 u32 i64 u64 c_int c_uint c_long c_ulong isize usize) @bool ["bool"] @char ["u8"] @beam_args ~w(beam.term beam.atom beam.pid) @enif_args ~w(e.ErlNifTerm e.ErlNifPid) @scalar_types @float_types ++ @int_types ++ @bool ++ @char ++ @beam_args ++ @enif_args @void ["void"] @env ~w(?*e.ErlNifEnv beam.env) @array_types Enum.flat_map(@scalar_types, &["[]#{&1}", "[*c]#{&1}", "[_]#{&1}"]) @valid_args @scalar_types ++ @array_types ++ @env @valid_retvals @scalar_types ++ @array_types ++ @void @enforce_keys [:name, :arity] defstruct @enforce_keys ++ [ doc: nil, args: [], retval: nil, opts: [], test: nil # only to be used for tests. This is the string name # of the test which is going to be bound in. ] @type concurrency :: :threaded | :yielding | :dirty_io | :dirty_cpu @type option :: {:concurrency, concurrency} @type t :: %__MODULE__{ name: atom, arity: arity, doc: iodata | nil, args: [String.t], retval: String.t, opts: [option], test: atom } @beam_envs ["beam.env", "?*e.ErlNifEnv"] # validate_arity/3: checks to make sure the arity of nif declaration matches the function @spec validate_arity([String.t], Parser.t, non_neg_integer) :: :ok | no_return def validate_arity([env | rest], context, line) when env in @beam_envs do validate_arity(rest, context, line) end def validate_arity(rest, context = %{local: %{arity: arity}}, line) when length(rest) != arity do raise SyntaxError, file: context.file, line: line + context.zig_block_line - 1, description: "nif declaration arity (#{arity}) doesn't match the expected function arity #{length(rest)}" end def validate_arity(_, _, _), do: :ok # validate_args/3 : raises if an invalid argument type is sent to to the function @spec validate_args([String.t], Parser.t, non_neg_integer) :: :ok | no_return def validate_args([], _context, _line), do: :ok def validate_args([args | rest], context, line) when args in @valid_args do validate_args(rest, context, line) end def validate_args([invalid_type | _], context, line) do raise SyntaxError, file: context.file, line: line + context.zig_block_line, description: "nif function #{context.local.name} demands an invalid argument type #{invalid_type}" end def validate_args(_, _, _), do: :ok @spec validate_retval([String.t], Parser.t, non_neg_integer) :: :ok | no_return def validate_retval([retval | _], _context, _line) when retval in @valid_retvals, do: :ok def validate_retval([retval | _], context, line) do raise SyntaxError, file: context.file, line: line + context.zig_block_line, description: "nif function #{context.local.name} returns an invalid type #{retval}" end def register_function_header([retval | args], context) do alias Zig.Nif.{Threaded, Yielding} final_nif = %{context.local | retval: retval, args: Enum.reverse(args)} # additional resources that the nif requires to perform correctly. These are # usually references dropped by called nif for a safe callback. resource = case context.local.opts[:concurrency] do # threaded nifs require a resource containing a reference to the thread # callback. :threaded -> [%Resource{ name: Threaded.cache_ptr(context.local.name), cleanup: Threaded.cache_cleanup(context.local.name) }] :yielding -> [%Resource{ name: Yielding.frame_ptr(context.local.name), cleanup: Yielding.frame_cleanup(context.local.name), }] _ -> [] end %{context | global: resource ++ [final_nif | context.global]} end end
lib/zig/parser/nif.ex
0.773216
0.560132
nif.ex
starcoder
defmodule StrongMigrations.Parser do @moduledoc """ The module responsible for reading an ASTand generating a bucket of the information of migration's content """ alias StrongMigrations.Migration @doc """ Parses given migrations into a struct with information about the content """ @spec parse([StrongMigrations.migration_file()]) :: :ok def parse(migration_files) do migration_files |> Stream.map(&file_path_to_ast/1) |> Stream.map(&analyze_code/1) |> Enum.to_list() end defp file_path_to_ast(file_path) do {file_path, file_path |> File.read!() |> Code.string_to_quoted!()} end defp analyze_code({file_path, {:defmodule, _, [_, [do: {:__block__, _, body}]]}}) do parse_body(body, Migration.new(file_path)) end defp analyze_code({file_path, _}), do: Migration.new(file_path) defp parse_body([{:use, _, _} | tail], acc), do: parse_body(tail, acc) defp parse_body([{:@, _, [{:disable_ddl_transaction, _, [true]}]} | tail], acc) do parse_body(tail, %{acc | disable_ddl_transaction: true}) end defp parse_body([{:@, _, [{:disable_migration_lock, _, [true]}]} | tail], acc) do parse_body(tail, %{acc | disable_migration_lock: true}) end defp parse_body([{:def, _, [{_, _, _}, [do: {:safety_assured, _, _}]]} | tail], acc) do parse_body(tail, acc) end defp parse_body( [{:def, _, [_, [do: {:create, _, [{:index, _, [_, _, [concurrently: true]]}]}]]} | tail], acc ) do parse_body(tail, %{acc | create_index_concurrently: true}) end defp parse_body( [ {:def, _, [_, [do: {:create, _, [{:unique_index, _, [_, _, [concurrently: true]]}]}]]} | tail ], acc ) do parse_body(tail, %{acc | create_index_concurrently: true}) end defp parse_body( [{:def, _, [_, [do: {:create, _, [{:index, _, [_, _, opts]}]}]]} | tail], acc ) do parse_body(tail, %{acc | create_index_concurrently: Keyword.get(opts, :concurrently, false)}) end defp parse_body( [{:def, _, [_, [do: {:create, _, [{:unique_index, _, [_, _, opts]}]}]]} | tail], acc ) do parse_body(tail, %{acc | create_index_concurrently: Keyword.get(opts, :concurrently, false)}) end defp parse_body( [{:def, _, [_, [do: {:drop, _, [{:index, _, [_, _, [concurrently: true]]}]}]]} | tail], acc ) do parse_body(tail, %{acc | drop_index_concurrently: true}) end defp parse_body([{:def, _, [_, [do: {:create, _, [{:index, _, _}]}]]} | tail], acc) do parse_body(tail, %{acc | create_index: true}) end defp parse_body([{:def, _, [_, [do: {:create, _, [{:unique_index, _, _}]}]]} | tail], acc) do parse_body(tail, %{acc | create_index: true}) end defp parse_body([{:def, _, [_, [do: {:drop, _, [{:index, _, _}]}]]} | tail], acc) do parse_body(tail, %{acc | drop_index: true}) end defp parse_body([{:def, _, [_, [do: {:drop, _, [{:table, _, _}]}]]} | tail], acc) do parse_body(tail, %{acc | drop_table: true}) end defp parse_body([{:def, _, [_, [do: {:drop_if_exists, _, [{:table, _, _}]}]]} | tail], acc) do parse_body(tail, %{acc | drop_table: true}) end defp parse_body( [ {:def, _, [_, [do: {:rename, _, [{:table, _, [_table]}, _column_name, [to: _new_column_name]]}]]} | tail ], acc ) do parse_body(tail, %{acc | rename_column: true}) end defp parse_body( [ {:def, _, [_, [do: {:alter, _, [{:table, _, _}, [do: {:__block__, _, opts}]]}]]} | tail ], acc ) do acc = parse_complex_body(opts, acc) parse_body(tail, acc) end defp parse_body([{:def, _, [_, [do: {:alter, _, opts}]]} | tail], acc) do acc = parse_complex_body(opts, acc) parse_body(tail, acc) end defp parse_body([{:def, _, [_, [do: {:__block__, _, opts}]]} | tail], acc) do acc = parse_complex_body(opts, acc) parse_body(tail, acc) end defp parse_body([_head | tail], acc) do parse_body(tail, acc) end defp parse_body([], acc), do: acc defp parse_complex_body([{:create, _, [{:index, _, [_, _, [concurrently: true]]}]} | tail], acc) do parse_complex_body(tail, %{acc | create_index_concurrently: true}) end defp parse_complex_body( [{:create, _, [{:unique_index, _, [_, _, [concurrently: true]]}]} | tail], acc ) do parse_complex_body(tail, %{acc | create_index_concurrently: true}) end defp parse_complex_body([{:create, _, [{:index, _, [_, _]}]} | tail], acc) do parse_complex_body(tail, %{acc | create_index: true}) end defp parse_complex_body([{:create, _, [{:unique_index, _, [_, _]}]} | tail], acc) do parse_complex_body(tail, %{acc | create_index: true}) end defp parse_complex_body([{:drop, _, [{:index, _, [_, _, [concurrently: true]]}]} | tail], acc) do parse_complex_body(tail, %{acc | drop_index_concurrently: true}) end defp parse_complex_body([{:drop, _, [{:index, _, [_, _]}]} | tail], acc) do parse_complex_body(tail, %{acc | drop_index: true}) end defp parse_complex_body( [{:rename, _, [{:table, _, [_table]}, _column_name, [to: _new_column_name]]} | tail], acc ) do parse_complex_body(tail, %{acc | rename_column: true}) end defp parse_complex_body([[do: {:remove, _, [_column]}] | tail], acc) do parse_complex_body(tail, %{acc | remove_column: true}) end defp parse_complex_body([[do: {:remove_if_exists, _, [_column]}] | tail], acc) do parse_complex_body(tail, %{acc | remove_column: true}) end defp parse_complex_body([{:remove, _, [_column]} | tail], acc) do parse_complex_body(tail, %{acc | remove_column: true}) end defp parse_complex_body([{:remove_if_exists, _, [_column]} | tail], acc) do parse_complex_body(tail, %{acc | remove_column: true}) end defp parse_complex_body( [{:alter, _, [{:table, _, [_table]}, [do: {:remove, _, [_column]}]]} | tail], acc ) do parse_complex_body(tail, %{acc | remove_column: true}) end defp parse_complex_body( [{:alter, _, [{:table, _, [_table]}, [do: {:remove_if_exist, _, [_column]}]]} | tail], acc ) do parse_complex_body(tail, %{acc | remove_column: true}) end defp parse_complex_body([{:drop, _, [{:table, _, _table_name}]} | tail], acc) do parse_complex_body(tail, %{acc | drop_table: true}) end defp parse_complex_body([{:drop_if_exists, _, [{:table, _, _table_name}]} | tail], acc) do parse_complex_body(tail, %{acc | drop_table: true}) end defp parse_complex_body([_head | tail], acc) do parse_complex_body(tail, acc) end defp parse_complex_body([], acc), do: acc end
lib/strong_migrations/parser.ex
0.804483
0.47792
parser.ex
starcoder
defmodule Cldr.Calendar.Coptic do @moduledoc """ Implementation of the Coptic calendar. """ import Cldr.Math, only: [mod: 2] import Cldr.Macros @behaviour Calendar @behaviour Cldr.Calendar @type year :: -9999..-1 | 1..9999 @type month :: 1..12 @type day :: 1..31 @months_in_year 13 @days_in_week 7 @doc """ Defines the CLDR calendar type for this calendar. This type is used in support of `Cldr.Calendar. localize/3`. """ @impl true def cldr_calendar_type do :coptic end @doc """ Identifies that this calendar is month based. """ @impl true def calendar_base do :month end @epoch Cldr.Calendar.Julian.date_to_iso_days(284, 8, 29) def epoch do @epoch end @doc """ Determines if the date given is valid according to this calendar. """ @impl true @months_with_30_days 1..12 def valid_date?(_year, month, day) when month in @months_with_30_days and day in 1..30 do true end def valid_date?(year, 13, 6) do if leap_year?(year), do: true, else: false end def valid_date?(_year, 13, day) when day in 1..5 do true end def valid_date?(_year, _month, _day) do false end @doc """ Calculates the year and era from the given `year`. The ISO calendar has two eras: the current era which starts in year 1 and is defined as era "1". And a second era for those years less than 1 defined as era "0". """ @spec year_of_era(year) :: {year, era :: 0..1} @impl true def year_of_era(year) when year > 0 do {year, 1} end def year_of_era(year) when year < 0 do {abs(year), 0} end @doc """ Calculates the quarter of the year from the given `year`, `month`, and `day`. It is an integer from 1 to 4. """ @spec quarter_of_year(year, month, day) :: 1..4 @impl true def quarter_of_year(_year, _month, _day) do {:error, :not_defined} end @doc """ Calculates the month of the year from the given `year`, `month`, and `day`. It is an integer from 1 to 12. """ @spec month_of_year(year, month, day) :: month @impl true def month_of_year(_year, month, _day) do month end @doc """ Calculates the week of the year from the given `year`, `month`, and `day`. It is an integer from 1 to 53. """ @spec week_of_year(year, month, day) :: {:error, :not_defined} @impl true def week_of_year(_year, _month, _day) do {:error, :not_defined} end @doc """ Calculates the ISO week of the year from the given `year`, `month`, and `day`. It is an integer from 1 to 53. """ @spec iso_week_of_year(year, month, day) :: {:error, :not_defined} @impl true def iso_week_of_year(_year, _month, _day) do {:error, :not_defined} end @doc """ Calculates the week of the year from the given `year`, `month`, and `day`. It is an integer from 1 to 53. """ @spec week_of_month(year, month, day) :: {pos_integer(), pos_integer()} | {:error, :not_defined} @impl true def week_of_month(_year, _month, _day) do {:error, :not_defined} end @doc """ Calculates the day and era from the given `year`, `month`, and `day`. """ @spec day_of_era(year, month, day) :: {day :: pos_integer(), era :: 0..1} @impl true def day_of_era(year, month, day) do {_, era} = year_of_era(year) days = date_to_iso_days(year, month, day) {days + epoch(), era} end @doc """ Calculates the day of the year from the given `year`, `month`, and `day`. """ @spec day_of_year(year, month, day) :: 1..366 @impl true def day_of_year(year, month, day) do first_day = date_to_iso_days(year, 1, 1) this_day = date_to_iso_days(year, month, day) this_day - first_day + 1 end @epoch_day_of_week 6 if Code.ensure_loaded?(Date) && function_exported?(Date, :day_of_week, 2) do @last_day_of_week 5 @spec day_of_week(year, month, day, :default | atom()) :: {Calendar.day_of_week(), first_day_of_week :: non_neg_integer(), last_day_of_week :: non_neg_integer()} @impl true def day_of_week(year, month, day, :default) do days = date_to_iso_days(year, month, day) days_after_saturday = rem(days, 7) day = Cldr.Math.amod(days_after_saturday + @epoch_day_of_week, @days_in_week) {day, @epoch_day_of_week, @last_day_of_week} end else @spec day_of_week(year, month, day) :: 1..7 @impl true def day_of_week(year, month, day) do days = date_to_iso_days(year, month, day) days_after_saturday = rem(days, 7) Cldr.Math.amod(days_after_saturday + @epoch_day_of_week, @days_in_week) end end @doc """ Returns the number of periods in a given `year`. A period corresponds to a month in month-based calendars and a week in week-based calendars.. """ @impl true def periods_in_year(_year) do @months_in_year end @doc """ Returns the number of months in a given `year`. """ @impl true def months_in_year(_year) do @months_in_year end @impl true def weeks_in_year(_year) do {:error, :not_defined} end @doc """ Returns the number days in a given year. """ @impl true def days_in_year(year) do if leap_year?(year), do: 366, else: 365 end @doc """ Returns how many days there are in the given year-month. """ @spec days_in_month(year, month) :: 29..31 @impl true def days_in_month(year, 13) do if leap_year?(year), do: 6, else: 5 end def days_in_month(_year, month) when month in @months_with_30_days do 30 end @doc """ Returns the number days in a a week. """ def days_in_week do @days_in_week end @doc """ Returns a `Date.Range.t` representing a given year. """ @impl true def year(year) do last_month = months_in_year(year) days_in_last_month = days_in_month(year, last_month) with {:ok, start_date} <- Date.new(year, 1, 1, __MODULE__), {:ok, end_date} <- Date.new(year, last_month, days_in_last_month, __MODULE__) do Date.range(start_date, end_date) end end @doc """ Returns a `Date.Range.t` representing a given quarter of a year. """ @impl true def quarter(_year, _quarter) do {:error, :not_defined} end @doc """ Returns a `Date.Range.t` representing a given month of a year. """ @impl true def month(year, month) do starting_day = 1 ending_day = days_in_month(year, month) with {:ok, start_date} <- Date.new(year, month, starting_day, __MODULE__), {:ok, end_date} <- Date.new(year, month, ending_day, __MODULE__) do Date.range(start_date, end_date) end end @doc """ Returns a `Date.Range.t` representing a given week of a year. """ @impl true def week(_year, _week) do {:error, :not_defined} end @doc """ Adds an `increment` number of `date_part`s to a `year-month-day`. `date_part` can be `:months` only. """ @impl true def plus(year, month, day, date_part, increment, options \\ []) def plus(year, month, day, :months, months, options) do months_in_year = months_in_year(year) {year_increment, new_month} = Cldr.Math.div_amod(month + months, months_in_year) new_year = year + year_increment new_day = if Keyword.get(options, :coerce, false) do max_new_day = days_in_month(new_year, new_month) min(day, max_new_day) else day end {new_year, new_month, new_day} end @doc """ Returns if the given year is a leap year. Since this calendar is observational we calculate the start of successive years and then calcualate the difference in days to determine if its a leap year. """ @spec leap_year?(year) :: boolean() @impl true def leap_year?(year) do mod(year, 4) == 3 end @doc """ Returns the number of days since the calendar epoch for a given `year-month-day` """ def date_to_iso_days(year, month, day) do epoch() - 1 + 365 * (year - 1) + :math.floor(year / 4) + 30 * (month - 1) + day |> trunc end @doc """ Returns a `{year, month, day}` calculated from the number of `iso_days`. """ def date_from_iso_days(iso_days) do year = :math.floor((4 * (iso_days - epoch()) + 1463) / 1461) month = :math.floor((iso_days - date_to_iso_days(year, 1, 1)) / 30) + 1 day = iso_days + 1 - date_to_iso_days(year, month, 1) {trunc(year), trunc(month), trunc(day)} end @doc """ Returns the `t:Calendar.iso_days/0` format of the specified date. """ @impl true @spec naive_datetime_to_iso_days( Calendar.year(), Calendar.month(), Calendar.day(), Calendar.hour(), Calendar.minute(), Calendar.second(), Calendar.microsecond() ) :: Calendar.iso_days() def naive_datetime_to_iso_days(year, month, day, hour, minute, second, microsecond) do {date_to_iso_days(year, month, day), time_to_day_fraction(hour, minute, second, microsecond)} end @doc """ Converts the `t:Calendar.iso_days/0` format to the datetime format specified by this calendar. """ @spec naive_datetime_from_iso_days(Calendar.iso_days()) :: { Calendar.year(), Calendar.month(), Calendar.day(), Calendar.hour(), Calendar.minute(), Calendar.second(), Calendar.microsecond() } @impl true def naive_datetime_from_iso_days({days, day_fraction}) do {year, month, day} = date_from_iso_days(days) {hour, minute, second, microsecond} = time_from_day_fraction(day_fraction) {year, month, day, hour, minute, second, microsecond} end @doc false calendar_impl() def parse_date(string) do Cldr.Calendar.Parse.parse_date(string, __MODULE__) end @doc false calendar_impl() def parse_utc_datetime(string) do Cldr.Calendar.Parse.parse_utc_datetime(string, __MODULE__) end @doc false calendar_impl() def parse_naive_datetime(string) do Cldr.Calendar.Parse.parse_naive_datetime(string, __MODULE__) end if Version.match?(System.version(), ">= 1.10.0-dev") do @doc false @impl Calendar defdelegate parse_time(string), to: Calendar.ISO end @doc false @impl Calendar defdelegate day_rollover_relative_to_midnight_utc, to: Calendar.ISO @doc false @impl Calendar defdelegate time_from_day_fraction(day_fraction), to: Calendar.ISO @doc false @impl Calendar defdelegate time_to_day_fraction(hour, minute, second, microsecond), to: Calendar.ISO @doc false @impl Calendar defdelegate date_to_string(year, month, day), to: Calendar.ISO @doc false @impl Calendar defdelegate datetime_to_string( year, month, day, hour, minute, second, microsecond, time_zone, zone_abbr, utc_offset, std_offset ), to: Calendar.ISO @doc false @impl Calendar defdelegate naive_datetime_to_string( year, month, day, hour, minute, second, microsecond ), to: Calendar.ISO @doc false @impl Calendar defdelegate time_to_string(hour, minute, second, microsecond), to: Calendar.ISO @doc false @impl Calendar defdelegate valid_time?(hour, minute, second, microsecond), to: Calendar.ISO end
lib/cldr/calendar/coptic.ex
0.927929
0.728797
coptic.ex
starcoder
defmodule CforumWeb.Views.ViewHelpers.RelativeTime do @moduledoc """ Provides functions to convert a time period to a relative text, e.g. „a year ago“ """ import CforumWeb.Gettext @doc """ Generates a relative time text, e.g. "less than 5 seconds" or "about an hour". `time` may be a `%DateTime{}` or the number of seconds for the duration. """ def relative_time(%DateTime{} = time), do: relative_time(Timex.diff(Timex.now(), time, :seconds)) def relative_time(%NaiveDateTime{} = time), do: relative_time(Timex.diff(Timex.now(), time, :seconds)) def relative_time(seconds) do minutes = round(seconds / 60) time_as_relative_text(minutes, seconds) end defp time_as_relative_text(minutes, seconds) when minutes in 0..1 do case seconds do seconds when seconds in 0..4 -> gettext("less than 5 seconds") seconds when seconds in 5..9 -> gettext("less than 10 seconds") seconds when seconds in 10..19 -> gettext("less than 20 seconds") seconds when seconds in 20..39 -> gettext("half a minute") seconds when seconds in 40..59 -> gettext("less than a minute") _ -> gettext("about 1 minute") end end defp time_as_relative_text(minutes, _) when minutes in 2..44, do: gettext("less than %{minutes} minutes", minutes: rounded_minutes(minutes)) defp time_as_relative_text(minutes, _) when minutes in 45..89, do: gettext("about an hour") defp time_as_relative_text(minutes, _) when minutes in 90..1439, do: gettext("about %{hours} hours", hours: round(minutes / 60)) defp time_as_relative_text(minutes, _) when minutes in 1440..2519, do: gettext("a day") defp time_as_relative_text(minutes, _) when minutes in 2_520..43_199, do: gettext("%{days} days", days: round(minutes / 1440)) defp time_as_relative_text(minutes, _) when minutes in 43_200..86_399, do: gettext("about 1 month") defp time_as_relative_text(minutes, _) when minutes in 86_400..525_599, do: gettext("%{months} months", months: round(minutes / 43_200)) defp time_as_relative_text(minutes, _) when minutes in 525_600..1_051_199, do: gettext("1 year") defp time_as_relative_text(minutes, _), do: gettext("%{years} years", years: round(minutes / 525_600)) defp rounded_minutes(no) do result = trunc(Float.floor((no + 5) / 5) * 5) if result < 5, do: 5, else: result end end
lib/cforum_web/views/view_helpers/relative_time.ex
0.752468
0.466846
relative_time.ex
starcoder
defmodule GGity.Geom.Boxplot do @moduledoc false alias GGity.{Draw, Geom, Plot, Shapes} @type t() :: %__MODULE__{} @type record() :: map() @type mapping() :: map() defstruct data: nil, mapping: nil, stat: :boxplot, position: :dodge, key_glyph: :boxplot, outlier_color: nil, outlier_fill: nil, outlier_shape: :circle, outlier_size: 5, color: "black", fill: "white", alpha: 1, box_group_width: nil, custom_attributes: nil @spec new(mapping(), keyword()) :: Geom.Boxplot.t() def new(mapping, options \\ []) do struct(Geom.Boxplot, [{:mapping, mapping} | options]) end @spec draw(Geom.Boxplot.t(), list(map()), Plot.t()) :: iolist() def draw(%Geom.Boxplot{} = geom_boxplot, data, plot) do number_of_levels = length(plot.scales.x.levels) group_width = (plot.width - number_of_levels * (plot.scales.x.padding - 1)) / number_of_levels geom_boxplot = struct(geom_boxplot, box_group_width: group_width) boxplots(geom_boxplot, data, plot) end defp boxplots(%Geom.Boxplot{} = geom_boxplot, data, plot) do data |> Enum.group_by(fn row -> row[geom_boxplot.mapping[:x]] end) |> Enum.with_index() |> Enum.map(fn {{_x_value, group}, group_index} -> boxplot_group(geom_boxplot, group, group_index, plot) end) end defp boxplot_group(geom_boxplot, group_values, group_index, %Plot{scales: scales} = plot) do scale_transforms = geom_boxplot.mapping |> Map.keys() |> Enum.reduce(%{}, fn aesthetic, mapped -> Map.put(mapped, aesthetic, Map.get(scales[aesthetic], :transform)) end) transforms = geom_boxplot |> Map.take([:alpha, :color, :fill]) |> Enum.reduce(%{}, fn {aesthetic, fixed_value}, fixed -> Map.put(fixed, aesthetic, fn _value -> fixed_value end) end) |> Map.merge(scale_transforms) count_rows = length(group_values) group_values |> Enum.sort_by( fn row -> {row[geom_boxplot.mapping[:fill]], row[geom_boxplot.mapping[:color]], row[geom_boxplot.mapping[:alpha]]} end, :asc ) |> Enum.reduce({0, []}, fn row, {total_width, rects} -> box_left = position_adjust_x(geom_boxplot, row, group_index, total_width, plot) box_width = position_adjust_bar_width(geom_boxplot, count_rows) box_right = box_left + box_width box_middle = box_left + box_width / 2 { total_width + geom_boxplot.box_group_width / count_rows, [ Draw.rect( [ x: box_left, y: plot.area_padding + plot.width / plot.aspect_ratio - position_adjust_y(row, plot), width: box_width, height: (transforms.y.(row[:upper]) - transforms.y.(row[:lower])) / plot.aspect_ratio, fill: transforms.fill.(row[geom_boxplot.mapping[:fill]]), fill_opacity: transforms.alpha.(row[geom_boxplot.mapping[:alpha]]), stroke: transforms.color.(row[geom_boxplot.mapping[:color]]), stroke_width: 0.5 ] ++ GGity.Layer.custom_attributes(geom_boxplot, plot, row) ), Draw.line( x1: box_left, x2: box_right, y1: plot.area_padding + plot.width / plot.aspect_ratio - plot.scales.y.transform.(row[:middle]) / plot.aspect_ratio, y2: plot.area_padding + plot.width / plot.aspect_ratio - plot.scales.y.transform.(row[:middle]) / plot.aspect_ratio, stroke: transforms.color.(row[geom_boxplot.mapping[:color]]) ), Draw.line( x1: box_middle, x2: box_middle, y1: plot.area_padding + plot.width / plot.aspect_ratio - plot.scales.y.transform.(row[:upper]) / plot.aspect_ratio, y2: plot.area_padding + plot.width / plot.aspect_ratio - plot.scales.y.transform.(row[:ymax]) / plot.aspect_ratio, stroke: transforms.color.(row[geom_boxplot.mapping[:color]]), stroke_width: 0.5 ), Draw.line( x1: box_middle, x2: box_middle, y1: plot.area_padding + plot.width / plot.aspect_ratio - plot.scales.y.transform.(row[:lower]) / plot.aspect_ratio, y2: plot.area_padding + plot.width / plot.aspect_ratio - plot.scales.y.transform.(row[:ymin]) / plot.aspect_ratio, stroke: transforms.color.(row[geom_boxplot.mapping[:color]]), stroke_width: 0.5 ), for outlier <- row.outliers do draw_outlier(outlier, box_middle, row, geom_boxplot, transforms, plot) end | rects ] } end) |> elem(1) end defp draw_outlier(value, box_middle, row, geom_boxplot, transforms, plot) do y_coord = plot.area_padding + (200 - plot.scales.y.transform.(value)) / plot.aspect_ratio fill = geom_boxplot.outlier_fill || transforms.color.(row[geom_boxplot.mapping[:color]]) color = geom_boxplot.outlier_color || transforms.color.(row[geom_boxplot.mapping[:color]]) # This will break when we have fillable shapes case geom_boxplot.outlier_shape do :na -> [] :circle -> Shapes.draw(:circle, {box_middle, y_coord}, :math.pow(geom_boxplot.outlier_size, 2), fill: fill, color: color ) shape -> Shapes.draw(shape, {box_middle, y_coord}, :math.pow(geom_boxplot.outlier_size, 2), fill: fill, color: color ) end end defp position_adjust_x( %Geom.Boxplot{position: :dodge} = geom_boxplot, _row, group_index, total_width, plot ) do plot.area_padding + group_index * (geom_boxplot.box_group_width + plot.scales.x.padding) + total_width end defp position_adjust_y(row, plot) do plot.scales.y.transform.(row[:upper]) / plot.aspect_ratio end defp position_adjust_bar_width(%Geom.Boxplot{position: :dodge} = geom_box, count_rows) do geom_box.box_group_width / count_rows end end
lib/ggity/geom/boxplot.ex
0.924492
0.631822
boxplot.ex
starcoder
defmodule ConCache.Item do @moduledoc """ This struct can be used in place of naked values to set per-item TTL values. """ defstruct value: nil, ttl: :infinity @type t :: %ConCache.Item{ value: ConCache.value(), ttl: pos_integer | :infinity | :renew | :no_update } end defmodule ConCache do require Logger @moduledoc """ Implements an ETS based key/value storage with following additional features: - row level synchronized writes (inserts, read/modify/write updates, deletes) - TTL support - modification callbacks Example usage: ConCache.start_link(name: :my_cache, ttl_check_interval: false) ConCache.put(:my_cache, :foo, 1) ConCache.get(:my_cache, :foo) # 1 The following rules apply: - Modifications are by isolated per row. Two processes can't modify the same row at the same time. Dirty operations are available through `dirty_` equivalents. - Reads are dirty by default. You can use `isolated/4` to perform isolated custom operations. - Operations are always performed in the caller process. Custom lock implementation is used to ensure synchronism. See `README.md` for more details. - In this example, items don't expire. See `start_link/1` for details on how to setup expiry. See `start_link/1` for more details. """ alias ConCache.Owner alias ConCache.Operations defstruct [ :owner_pid, :ets, :ttl_manager, :ttl, :acquire_lock_timeout, :callback, :touch_on_read, :lock_pids ] @type t :: pid | atom | {:global, any} | {:via, atom, any} @type key :: any @type value :: any @type store_value :: value | ConCache.Item.t() @type callback_fun :: ({:update, pid, key, value} | {:delete, pid, key} -> any) @type ets_option :: :named_table | :compressed | {:heir, pid} | {:write_concurrency, boolean} | {:read_concurrency, boolean} | :ordered_set | :set | :bag | :duplicate_bag | {:name, atom} @type options :: [ {:name, atom} | {:global_ttl, non_neg_integer} | {:acquire_lock_timeout, pos_integer} | {:callback, callback_fun} | {:touch_on_read, boolean} | {:ttl_check_interval, non_neg_integer | false} | {:time_size, pos_integer} | {:ets_options, [ets_option]} ] @type update_fun :: (value -> {:ok, store_value} | {:error, any}) @type store_fun :: (() -> store_value) @doc """ Starts the server and creates an ETS table. Options: - `{:name, atom} - A name of the cache process.` - `{:ttl_check_interval, time_ms | false}` - Required. A check interval for TTL expiry. Provide a positive integer for expiry to work, or pass `false` to disable ttl checks. See below for more details on expiry. - `{:global_ttl, time_ms | :infinity}` - The time after which an item expires. When an item expires, it is removed from the cache. Updating the item extends its expiry time. - `{:touch_on_read, true | false}` - Controls whether read operation extends expiry of items. False by default. - `{:callback, callback_fun}` - If provided, this function is invoked __after__ an item is inserted or updated, or __before__ it is deleted. - `{:acquire_lock_timeout, timeout_ms}` - The time a client process waits for - `{:ets_options, [ets_option]` – The options for ETS process. the lock. Default is 5000. In addition, following ETS options are supported: - `:set` - An ETS table will be of the `:set` type (default). - `:ordered_set` - An ETS table will be of the `:ordered_set` type. - `:bag` - An ETS table will be of the `:bag` type. - `:duplicate_bag` - An ETS table will be of the `:duplicate_bag` type. - `:named_table` - `:name` - `:heir` - `:write_concurrency` - `:read_concurrency` ## Child specification To insert your cache into the supervision tree, pass the child specification in the shape of `{ConCache, con_cache_options}`. For example: ``` {ConCache, [name: :my_cache, ttl_check_interval: false]} ``` ## Expiry To configure expiry, you need to provide positive integer for the `:ttl_check_interval` option. This integer represents the millisecond interval in which the expiry is performed. You also need to provide the `:global_ttl` option, which represents the default TTl time for the item. TTL of each item is by default extended only on modifications. This can be changed with the `touch_on_read: true` option. If you need a granular control of expiry per each item, you can pass a `ConCache.Item` struct when storing data. If you don't want a modification of an item to extend its TTL, you can pass a `ConCache.Item` struct, with `:ttl` field set to `:no_update`. ### Choosing ttl_check_interval time When expiry is configured, the owner process works in discrete steps, doing cleanups every `ttl_check_interval` milliseconds. This approach allows the owner process to do fairly small amount of work in each discrete step. Assuming there's no huge system overload, an item's max lifetime is thus `global_ttl + ttl_check_interval` [ms], after the last item's update. Thus, a lower value of ttl_check_interval time means more frequent purging which may reduce your memory consumption, but could also cause performance penalties. Higher values put less pressure on processing, but item expiry is less precise. """ @spec start_link(options) :: Supervisor.on_start() def start_link(options) do options = Keyword.merge(options, ttl: options[:global_ttl], ttl_check: options[:ttl_check_interval]) with :ok <- validate_ttl(options[:ttl_check_interval], options[:global_ttl]) do Supervisor.start_link( [ {ConCache.LockSupervisor, System.schedulers_online()}, {Owner, options} ], [strategy: :one_for_all] ++ Keyword.take(options, [:name]) ) end end defp validate_ttl(false, nil), do: :ok defp validate_ttl(false, _global_ttl), do: raise( ArgumentError, "ConCache ttl_check_interval is false and global_ttl is set. Either remove your global_ttl or set ttl_check_interval to a time" ) defp validate_ttl(nil, _global_ttl), do: raise(ArgumentError, "ConCache ttl_check_interval must be supplied") defp validate_ttl(_ttl_check_interval, nil), do: raise(ArgumentError, "ConCache global_ttl must be supplied") defp validate_ttl(_ttl_check_interval, _global_ttl), do: :ok @doc false @spec child_spec(options) :: Supervisor.child_spec() def child_spec(opts) do %{ id: __MODULE__, start: {__MODULE__, :start_link, [opts]}, type: :supervisor } end @doc """ Returns the ets table managed by the cache. """ @spec ets(t) :: :ets.tab() def ets(cache_id), do: Operations.ets(Owner.cache(cache_id)) @doc """ Reads the item from the cache. A read is always "dirty", meaning it doesn't block while someone is updating the item under the same key. A read doesn't expire TTL of the item, unless `touch_on_read` option is set while starting the cache. """ @spec get(t, key) :: value def get(cache_id, key), do: Operations.get(Owner.cache(cache_id), key) @doc """ Stores the item into the cache. """ @spec put(t, key, store_value) :: :ok def put(cache_id, key, value), do: Operations.put(Owner.cache(cache_id), key, value) @doc """ Returns the number of items stored in the cache. """ @spec size(t) :: non_neg_integer def size(cache_id), do: Operations.size(Owner.cache(cache_id)) @doc """ Dirty equivalent of `put/3`. """ @spec dirty_put(t, key, store_value) :: :ok def dirty_put(cache_id, key, value), do: Operations.dirty_put(Owner.cache(cache_id), key, value) @doc """ Inserts the item into the cache unless it exists. """ @spec insert_new(t, key, store_value) :: :ok | {:error, :already_exists} def insert_new(cache_id, key, value), do: Operations.insert_new(Owner.cache(cache_id), key, value) @doc """ Dirty equivalent of `insert_new/3`. """ @spec dirty_insert_new(t, key, store_value) :: :ok | {:error, :already_exists} def dirty_insert_new(cache_id, key, value), do: Operations.insert_new(Owner.cache(cache_id), key, value) @doc """ Updates the item, or stores new item if it doesn't exist. The `update_fun` is invoked after the item is locked. Here, you can be certain that no other process will update this item, unless they are doing dirty updates or writing directly to the underlying ETS table. This function is not supported by `:bag` or `:duplicate_bag` ETS tables. The updater lambda must return one of the following: - `{:ok, value}` - causes the value to be stored into the table - `{:error, reason}` - the value won't be stored and `{:error, reason}` will be returned """ @spec update(t, key, update_fun) :: :ok | {:error, any} def update(cache_id, key, update_fun), do: Operations.update(Owner.cache(cache_id), key, update_fun) @doc """ Dirty equivalent of `update/3`. """ @spec dirty_update(t, key, update_fun) :: :ok | {:error, any} def dirty_update(cache_id, key, update_fun), do: Operations.dirty_update(Owner.cache(cache_id), key, update_fun) @doc """ Updates the item only if it exists. Otherwise works just like `update/3`. """ @spec update_existing(t, key, update_fun) :: :ok | {:error, :not_existing} | {:error, any} def update_existing(cache_id, key, update_fun), do: Operations.update_existing(Owner.cache(cache_id), key, update_fun) @doc """ Dirty equivalent of `update_existing/3`. """ @spec dirty_update_existing(t, key, update_fun) :: :ok | {:error, :not_existing} | {:error, any} def dirty_update_existing(cache_id, key, update_fun), do: Operations.dirty_update_existing(Owner.cache(cache_id), key, update_fun) @doc """ Deletes the item from the cache. """ @spec delete(t, key) :: :ok def delete(cache_id, key), do: Operations.delete(Owner.cache(cache_id), key) @doc """ Dirty equivalent of `delete/2`. """ @spec dirty_delete(t, key) :: :ok def dirty_delete(cache_id, key), do: Operations.dirty_delete(Owner.cache(cache_id), key) @doc """ Retrieves the item from the cache, or inserts the new item. If the item exists in the cache, it is retrieved. Otherwise, the lambda function is executed and its result is stored under the given key. This function is not supported by `:bag` and `:duplicate_bag` ETS tables. Note: if the item is already in the cache, this function amounts to a simple get without any locking, so you can expect it to be fairly fast. """ @spec get_or_store(t, key, store_fun) :: value def get_or_store(cache_id, key, store_fun), do: Operations.get_or_store(Owner.cache(cache_id), key, store_fun) @doc """ Dirty equivalent of `get_or_store/3`. """ @spec dirty_get_or_store(t, key, store_fun) :: value def dirty_get_or_store(cache_id, key, store_fun), do: Operations.dirty_get_or_store(Owner.cache(cache_id), key, store_fun) @doc """ Manually touches the item to prolongate its expiry. """ @spec touch(t, key) :: :ok def touch(cache_id, key), do: Operations.touch(Owner.cache(cache_id), key) @doc """ Isolated execution over arbitrary lock in the cache. You can do whatever you want in the function, not necessarily related to the cache. The return value is the result of the provided lambda. This allows you to perform flexible isolation. If you use the key of your item as a `key`, then this operation will be exclusive to updates. This can be used e.g. to perform isolated reads: # Process A: ConCache.isolated(:my_cache, :my_item_key, fn() -> ... end) # Process B: ConCache.update(:my_cache, :my_item, fn(old_value) -> ... end) These two operations are mutually exclusive. """ @spec isolated(t, key, nil | pos_integer, (() -> any)) :: any def isolated(cache_id, key, timeout \\ nil, fun), do: Operations.isolated(Owner.cache(cache_id), key, timeout, fun) @doc """ Similar to `isolated/4` except it doesn't wait for the lock to be available. If the lock can be acquired immediately, it will be acquired and the function will be invoked. Otherwise, an error is returned immediately. """ @spec try_isolated(t, key, nil | pos_integer, (() -> any)) :: {:error, :locked} | {:ok, any} def try_isolated(cache_id, key, timeout \\ nil, on_success), do: Operations.try_isolated(Owner.cache(cache_id), key, timeout, on_success) end
lib/con_cache.ex
0.916407
0.628721
con_cache.ex
starcoder
defmodule Brando.MigrationTest.Project do use Brando.Blueprint, application: "Brando", domain: "Projects", schema: "Project", singular: "project", plural: "projects" trait(Brando.Trait.Creator) trait(Brando.Trait.SoftDelete) trait(Brando.Trait.Sequenced) trait(Brando.Trait.Timestamped) trait(Brando.Trait.Translatable) attributes do attribute(:title, :string) attribute(:status, :status, required: true) attribute(:slug, :slug, from: :title, required: true, unique: [prevent_collision: :language]) attribute(:data, :villain) end assets do asset(:cover, :image, cfg: [ allowed_mimetypes: ["image/jpeg", "image/png", "image/gif"], upload_path: Path.join("images", "avatars"), random_filename: true, size_limit: 10_240_000, sizes: %{"micro" => %{"size" => "25", "quality" => 10, "crop" => false}}, srcset: [{"small", "300w"}, {"medium", "500w"}, {"large", "700w"}] ] ) end relations do relation(:properties, :embeds_many, module: Brando.MigrationTest.Property) end end defmodule Brando.MigrationTest.ProjectUpdate1 do use Brando.Blueprint, application: "Brando", domain: "Projects", schema: "Project", singular: "project", plural: "projects" trait(Brando.Trait.Creator) trait(Brando.Trait.Meta) trait(Brando.Trait.Sequenced) trait(Brando.Trait.Timestamped) trait(Brando.Trait.Translatable) attributes do attribute(:title, :string) attribute(:status, :status, required: true) attribute(:slug, :slug, from: :title, required: true, unique: [prevent_collision: :language]) attribute(:summary, :text) attribute(:unique_hash, :text, unique: true) attribute(:data, :villain) end assets do asset(:cover, :image, cfg: [ allowed_mimetypes: ["image/jpeg", "image/png", "image/gif"], default_size: "medium", upload_path: Path.join("images", "avatars"), random_filename: true, size_limit: 10_240_000, sizes: %{"micro" => %{"size" => "25", "quality" => 10, "crop" => false}}, srcset: [{"small", "300w"}, {"medium", "500w"}, {"large", "700w"}] ] ) end relations do relation(:properties, :embeds_many, module: Brando.MigrationTest.Property) relation(:more_properties, :embeds_many, module: Brando.MigrationTest.Property) end end defmodule Brando.MigrationTest.ProjectUpdate2 do use Brando.Blueprint, application: "Brando", domain: "Projects", schema: "Project", singular: "project", plural: "projects" trait(Brando.Trait.Sequenced) trait(Brando.Trait.Timestamped) trait(Brando.Trait.Translatable) attributes do attribute(:summary, :text) attribute(:unique_hash, :text, unique: true) attribute(:data, :villain) end end defmodule Brando.MigrationTest.Property do use Brando.Blueprint, application: "Brando", domain: "Projects", schema: "Property", singular: "property", plural: "properties" data_layer(:embedded) attributes do attribute(:key, :string) attribute(:value, :string) end end defmodule Brando.MigrationTest.Profile do use Brando.Blueprint, application: "Brando", domain: "Persons", schema: "Person", singular: "profiles", plural: "profile" trait(Brando.Trait.Creator) trait(Brando.Trait.SoftDelete) trait(Brando.Trait.Sequenced) trait(Brando.Trait.Timestamped) primary_key(:uuid) attributes do attribute(:status, :string) end end defmodule Brando.MigrationTest.Person do use Brando.Blueprint, application: "Brando", domain: "Persons", schema: "Person", singular: "person", plural: "persons" trait(Brando.Trait.Creator) trait(Brando.Trait.SoftDelete) trait(Brando.Trait.Sequenced) trait(Brando.Trait.Timestamped) trait(Brando.Trait.Translatable) primary_key(:uuid) attributes do attribute(:name, :string) attribute(:email, :string, required: true) end relations do relation(:profile, :belongs_to, module: Brando.MigrationTest.Profile, type: :binary_id) end end
test/support/blueprints/migration_schemas.ex
0.576542
0.422386
migration_schemas.ex
starcoder
defmodule Commanded.Aggregate.Multi.BankAccount do defstruct [ account_number: nil, balance: 0, state: nil, ] alias Commanded.Aggregate.Multi alias Commanded.Aggregate.Multi.BankAccount defmodule Commands do defmodule OpenAccount, do: defstruct [:account_number, :initial_balance] defmodule WithdrawMoney, do: defstruct [:account_number, :transfer_uuid, :amount] end defmodule Events do defmodule BankAccountOpened, do: defstruct [:account_number, :balance] defmodule MoneyWithdrawn, do: defstruct [:account_number, :transfer_uuid, :amount, :balance] end alias Commands.{OpenAccount,WithdrawMoney} alias Events.{BankAccountOpened,MoneyWithdrawn} def execute( %BankAccount{state: nil}, %OpenAccount{account_number: account_number, initial_balance: initial_balance}) when is_number(initial_balance) and initial_balance > 0 do %BankAccountOpened{account_number: account_number, balance: initial_balance} end def execute( %BankAccount{state: :active} = account, %WithdrawMoney{amount: amount}) when is_number(amount) and amount > 0 do account |> Multi.new() |> Multi.execute(&withdraw_money(&1, amount)) |> Multi.execute(&check_balance/1) end # state mutatators def apply( %BankAccount{} = state, %BankAccountOpened{account_number: account_number, balance: balance}) do %BankAccount{state | account_number: account_number, balance: balance, state: :active, } end def apply(%BankAccount{} = state, %MoneyWithdrawn{balance: balance}), do: %BankAccount{state | balance: balance} # private helpers defp withdraw_money(%BankAccount{account_number: account_number, balance: balance}, amount) do %MoneyWithdrawn{ account_number: account_number, amount: amount, balance: balance - amount } end defp check_balance(%BankAccount{balance: balance}) when balance < 0 do {:error, :insufficient_funds_available} end defp check_balance(%BankAccount{}), do: [] end
test/aggregates/support/multi_bank_account.ex
0.576304
0.549338
multi_bank_account.ex
starcoder
defmodule Appsignal.Span do alias Appsignal.{Config, Nif, Span} defstruct [:reference, :pid] @nif Application.get_env(:appsignal, :appsignal_tracer_nif, Appsignal.Nif) def create_root(namespace, pid) do if Config.active?() do {:ok, reference} = @nif.create_root_span(namespace) %Span{reference: reference, pid: pid} end end def create_root(namespace, pid, start_time) do if Config.active?() do sec = :erlang.convert_time_unit(start_time, :native, :second) nsec = :erlang.convert_time_unit(start_time, :native, :nanosecond) - sec * 1_000_000_000 {:ok, reference} = @nif.create_root_span_with_timestamp(namespace, sec, nsec) %Span{reference: reference, pid: pid} end end def create_child(%Span{reference: parent}, pid) do if Config.active?() do {:ok, reference} = @nif.create_child_span(parent) %Span{reference: reference, pid: pid} end end def create_child(%Span{reference: parent}, pid, start_time) do if Config.active?() do sec = :erlang.convert_time_unit(start_time, :native, :second) nsec = :erlang.convert_time_unit(start_time, :native, :nanosecond) - sec * 1_000_000_000 {:ok, reference} = @nif.create_child_span_with_timestamp(parent, sec, nsec) %Span{reference: reference, pid: pid} end end def set_name(%Span{reference: reference} = span, name) when is_reference(reference) and is_binary(name) do if Config.active?() do :ok = @nif.set_span_name(reference, name) span end end def set_name(_span, _name), do: nil def set_namespace(%Span{reference: reference} = span, namespace) when is_binary(namespace) do :ok = @nif.set_span_namespace(reference, namespace) span end def set_namespace(_span, _name), do: nil def set_attribute(%Span{reference: reference} = span, key, true) when is_binary(key) do :ok = Nif.set_span_attribute_bool(reference, key, 1) span end def set_attribute(%Span{reference: reference} = span, key, false) when is_binary(key) do :ok = Nif.set_span_attribute_bool(reference, key, 0) span end def set_attribute(%Span{reference: reference} = span, key, value) when is_binary(key) and is_binary(value) do :ok = Nif.set_span_attribute_string(reference, key, value) span end def set_attribute(%Span{reference: reference} = span, key, value) when is_binary(key) and is_integer(value) do :ok = Nif.set_span_attribute_int(reference, key, value) span end def set_attribute(%Span{reference: reference} = span, key, value) when is_binary(key) and is_float(value) do :ok = Nif.set_span_attribute_double(reference, key, value) span end def set_attribute(_span, _key, _value), do: nil def set_sql(%Span{reference: reference} = span, body) when is_binary(body) do :ok = Nif.set_span_attribute_sql_string(reference, "appsignal:body", body) span end def set_sql(_span, _body), do: nil def set_sample_data(%Span{reference: reference} = span, key, value) when is_binary(key) and is_map(value) do data = value |> Appsignal.Utils.MapFilter.filter() |> Appsignal.Utils.DataEncoder.encode() :ok = Nif.set_span_sample_data(reference, key, data) span end def set_sample_data(_span, _key, _value), do: nil def add_error(span, kind, reason, stacktrace) do {name, message, formatted_stacktrace} = Appsignal.Error.metadata(kind, reason, stacktrace) do_add_error(span, name, message, formatted_stacktrace) end def add_error(span, %_{__exception__: true} = exception, stacktrace) do {name, message, formatted_stacktrace} = Appsignal.Error.metadata(exception, stacktrace) do_add_error(span, name, message, formatted_stacktrace) end def do_add_error(%Span{reference: reference} = span, name, message, stacktrace) do if Config.active?() do :ok = @nif.add_span_error( reference, name, message, Appsignal.Utils.DataEncoder.encode(stacktrace) ) span end end def do_add_error(nil, _name, _message, _stacktrace), do: nil def close(%Span{reference: reference} = span) do :ok = @nif.close_span(reference) span end def close(nil), do: nil def close(%Span{reference: reference} = span, end_time) do sec = :erlang.convert_time_unit(end_time, :native, :second) nsec = :erlang.convert_time_unit(end_time, :native, :nanosecond) - sec * 1_000_000_000 :ok = @nif.close_span_with_timestamp(reference, sec, nsec) span end def close(nil, _end_time), do: nil def to_map(%Span{reference: reference}) do {:ok, json} = Nif.span_to_json(reference) Appsignal.Json.decode!(json) end end
lib/appsignal/span.ex
0.528047
0.40116
span.ex
starcoder
defmodule Kayrock.MessageSet do @moduledoc """ Represents a set of messages with the v0 or v1 format This is the old format that KafkaEx supported See https://kafka.apache.org/documentation/#recordbatch """ defmodule Message do @moduledoc """ Represents a single message with the v0 or v1 format This is the old format that KafkaEx supported See https://kafka.apache.org/documentation/#recordbatch """ defstruct offset: nil, compression: :none, key: nil, value: nil, attributes: nil, crc: nil, timestamp: nil, timestamp_type: nil @type t :: %__MODULE__{} end use Bitwise defstruct messages: [], magic: 0 @type t :: %__MODULE__{} @spec serialize(t) :: iodata def serialize(%__MODULE__{messages: messages}) when is_list(messages) do [%Message{compression: compression} | _] = messages # note when we serialize we never have an offset {message, msize} = create_message_set(messages, compression) [<<msize::32-signed>>, message] end @spec deserialize(binary) :: t def deserialize(data), do: deserialize(data, 0) def deserialize(data, magic) do msgs = do_deserialize(data, [], 0) %__MODULE__{messages: msgs, magic: magic} end defp do_deserialize( <<offset::64-signed, msg_size::32-signed, msg::size(msg_size)-binary, orig_rest::bits>>, acc, add_offset ) do <<crc::32, magic::8-signed, attributes::8-signed, rest::bits>> = msg {timestamp, rest} = case magic do 0 -> {nil, rest} 1 -> Kayrock.Deserialize.deserialize(:int64, rest) end {key, rest} = deserialize_string(rest) {value, <<>>} = deserialize_string(rest) msg = case compression_from_attributes(attributes) do 0 -> timestamp_type = timestamp_type_from_attributes(attributes, magic) %Message{ offset: offset + add_offset, crc: crc, attributes: attributes, key: key, value: value, timestamp: timestamp, timestamp_type: timestamp_type } c -> decompressed = Kayrock.Compression.decompress(c, value) if magic == 1 do Enum.reverse(do_deserialize(decompressed, [], offset)) else Enum.reverse(do_deserialize(decompressed, [], 0)) end end do_deserialize(orig_rest, [msg | acc], add_offset) end defp do_deserialize(_, acc, _add_offset) do Enum.reverse(List.flatten(acc)) end defp create_message_set([], _compression_type), do: {"", 0} defp create_message_set(messages, :none) do create_message_set_uncompressed(messages) end defp create_message_set(messages, compression_type) do alias Kayrock.Compression {message_set, _} = create_message_set(messages, :none) {compressed_message_set, attribute} = Compression.compress(compression_type, message_set) {message, msize} = create_message(compressed_message_set, nil, attribute) {[<<0::64-signed>>, <<msize::32-signed>>, message], 8 + 4 + msize} end defp create_message_set_uncompressed([ %Message{key: key, value: value} | messages ]) do {message, msize} = create_message(value, key) message_set = [<<0::64-signed>>, <<msize::32-signed>>, message] {message_set2, ms2size} = create_message_set(messages, :none) {[message_set, message_set2], 8 + 4 + msize + ms2size} end defp create_message(value, key, attributes \\ 0) do {bkey, skey} = bytes(key) {bvalue, svalue} = bytes(value) sub = [<<0::8, attributes::8-signed>>, bkey, bvalue] crc = :erlang.crc32(sub) {[<<crc::32>>, sub], 4 + 2 + skey + svalue} end # the 3 lsb specifies compression defp compression_from_attributes(a), do: a &&& 7 defp timestamp_type_from_attributes(a, 1), do: a &&& 8 defp timestamp_type_from_attributes(_, _), do: nil defp deserialize_string(<<-1::32-signed, rest::bits>>), do: {nil, rest} defp deserialize_string(<<str_size::32-signed, str::size(str_size)-binary, rest::bits>>), do: {str, rest} defp bytes(nil), do: {<<-1::32-signed>>, 4} defp bytes(data) do case :erlang.iolist_size(data) do 0 -> {<<0::32>>, 4} size -> {[<<size::32>>, data], 4 + size} end end end
lib/kayrock/message_set.ex
0.804675
0.460532
message_set.ex
starcoder
defmodule BinFormat.FieldType.IpAddr do defstruct name: nil, default: nil, options: [] @moduledoc """ :inet style IP address field type for defformat. """ @doc """ Add an IP address field to the format structure in defformat. This field type decodes IP addresses represented as 32 bit integers in binaries directly into the tuple format used by the networking modules in the Erlang standard library. For example, it will transform the binary value `<< 192, 168, 1, 1 >>` into `{192, 168, 1, 1}` and vice versa. By default the binary value will be treated as big endian. To use a little endian encoding add the atom `:little` to the list of options. """ defmacro ip_addr(name, default, options \\ []) do field = quote do %BinFormat.FieldType.IpAddr{name: unquote(name), default: unquote(default), options: unquote(options)} end quote do BinFormat.FieldType.Util.add_field(unquote(field), __ENV__) end end end defimpl BinFormat.Field, for: BinFormat.FieldType.IpAddr do alias BinFormat.FieldType.IpAddr, as: IpAddr defp struct_pattern(%IpAddr{name: name}, module, prefix) do a_name = full_name(name, "_ip_a", prefix) b_name = full_name(name, "_ip_b", prefix) c_name = full_name(name, "_ip_c", prefix) d_name = full_name(name, "_ip_d", prefix) pattern = quote do {unquote(name), {unquote(Macro.var(a_name, module)), unquote(Macro.var(b_name, module)), unquote(Macro.var(c_name, module)), unquote(Macro.var(d_name, module))}} end {:ok, pattern} end defp bin_pattern(%IpAddr{name: name, options: options}, module, prefix) do a_name = Macro.var(full_name(name, "_ip_a", prefix), module) b_name = Macro.var(full_name(name, "_ip_b", prefix), module) c_name = Macro.var(full_name(name, "_ip_c", prefix), module) d_name = Macro.var(full_name(name, "_ip_d", prefix), module) little_endian = Enum.member?(options, :little) pattern = if little_endian do quote do <<unquote(d_name), unquote(c_name), unquote(b_name), unquote(a_name)>> end else quote do <<unquote(a_name), unquote(b_name), unquote(c_name), unquote(d_name)>> end end {:ok, pattern} end defp full_name(name, arg, prefix) do String.to_atom(prefix <> arg <> Atom.to_string(name)) end def struct_definition(%IpAddr{name: name, default: default}, _module) do BinFormat.FieldType.Util.standard_struct_def(name, default) end def struct_match_pattern(fields, module, prefix) do struct_pattern(fields, module, prefix) end def struct_build_pattern(fields, module, prefix) do struct_pattern(fields, module, prefix) end def bin_match_pattern(fields, module, prefix) do bin_pattern(fields, module, prefix) end def bin_build_pattern(fields, module, prefix) do bin_pattern(fields, module, prefix) end end
lib/bin_format/field_type/ip_addr.ex
0.744099
0.537891
ip_addr.ex
starcoder
defmodule Twitter.Clock do @moduledoc """ Helper functions to deal with time """ @type t :: :calendar.datetime @type time :: :calendar.time @doc """ Returns the current local time """ @spec now :: t def now do :calendar.local_time end @doc """ Returns the current local time of yesterday """ @spec yesterday :: t def yesterday do {date, time} = :calendar.local_time days = :calendar.date_to_gregorian_days(date) date = :calendar.gregorian_days_to_date(days - 1) {date, time} end @doc """ Returns the elapsed time between two datetimes in a human readable form iex> {date, _} = Twitter.Clock.now iex> Twitter.Clock.format_elapsed_time({date, {15, 7, 10}}, {date, {15, 7, 12}}) "2 seconds ago" """ @spec format_elapsed_time(t, t) :: String.t def format_elapsed_time(at, now) do format_elapsed_time(:calendar.time_difference(at, now)) end defp format_elapsed_time({0, {0, 0, 0}}), do: "just now" defp format_elapsed_time({0, {0, 0, 1}}), do: "1 second ago" defp format_elapsed_time({0, {0, 1, _}}), do: "1 minute ago" defp format_elapsed_time({0, {1, _, _}}), do: "1 hour ago" defp format_elapsed_time({0, {0, 0, n}}), do: "#{n} seconds ago" defp format_elapsed_time({0, {0, n, _}}), do: "#{n} minutes ago" defp format_elapsed_time({0, {n, _, _}}), do: "#{n} hours ago" defp format_elapsed_time({1, _}), do: "1 day ago" defp format_elapsed_time({n, _}), do: "#{n} days ago" @doc """ Returns today datetime at given time """ @spec at(time) :: t def at({h, m, s}) do {date, _} = now {date, {h, m, s}} end @doc """ Adds or subtracts an amount of time to the given datetime Add `n` `units` of time with `{:after, n, units}`. Subtract with `{:before, n, units}` iex> Clock.at({{2015, 7, 19}, {15, 7, 43}}, {:after, 20, :seconds}) {{2015, 7, 19}, {15, 8, 3}} """ @spec at(t, {:after, pos_integer, timeunit} | {:before, pos_integer, timeunit}) :: t when timeunit: :second | :seconds | :minute | :minutes | :hour | :hours | :day | :days def at(clock, {:after, n, units}) do :calendar.gregorian_seconds_to_datetime(seconds_in(clock) + seconds_in(n, units)) end def at(clock, {:before, n, units}) do :calendar.gregorian_seconds_to_datetime(seconds_in(clock) - seconds_in(n, units)) end defp seconds_in(datetime), do: :calendar.datetime_to_gregorian_seconds(datetime) defp seconds_in(n, :second), do: n defp seconds_in(n, :seconds), do: n defp seconds_in(n, :minute), do: n * 60 defp seconds_in(n, :minutes), do: n * 60 defp seconds_in(n, :hour), do: n * 3600 defp seconds_in(n, :hours), do: n * 3600 defp seconds_in(n, :day), do: n * 86400 defp seconds_in(n, :days), do: n * 86400 end
lib/clock.ex
0.703651
0.419737
clock.ex
starcoder
defmodule Unleash.Strategy do @moduledoc """ Used to extend the client and create custom strategies. To do so, `use` this module within your custom strategy and implmenent `c:enabled?/2`. Provide a name that is human-readable, as it is logged. ```elixir defmodule MyApp.CustomStrategy use Unleash.Strategy, name: "CustomStrategy" def enabled?(_params, _context), do: true end ``` """ require Logger alias Unleash.Config alias Unleash.Strategy.Constraint defmacro __using__(opts) do name = opts[:name] quote line: true do require Logger alias unquote(__MODULE__) @behaviour unquote(__MODULE__) @name unquote(name) @doc false def check_enabled(params \\ %{}, context) do params |> enabled?(context) |> Strategy.log_result(@name) end end end @doc """ You can implmenet this callback a couple of ways, returning a bare `boolean()` or a `{boolean, map()}`. The latter is preferred, as it generates a `:debug` level log entry detailing the name of the strategy, the result, and the contents of `map()`, in an effort to help understand why the result was what it was. ## Arguments * `parameters` - A map of paramters returned from the Unleash server. This can be whatever you like, such as a configured list of `userIds`. * `context` - The context passed into `Unleash.enabled?/3`. ## Examples ```elixir @behaviour Unleash.Strategy def enabled?(params, context), do: {false, params} def enabled(params, %{}), do: false ``` """ @callback enabled?(parameters :: map(), context :: Unleash.context()) :: boolean() | {boolean(), map()} @doc false def enabled?(%{"name" => name} = strategy, context) do {_name, module} = Config.strategies() |> Enum.find(fn {n, _mod} -> n == name end) check_constraints(strategy, context) and module.check_enabled(strategy["parameters"], context) end def enabled?(_strat, _context), do: false @spec log_result({boolean(), map()}, String.t()) :: boolean() def log_result({result, opts}, name) when is_map(opts) do Logger.debug(fn -> opts |> Stream.map(fn {k, v} -> "#{k}: #{v}" end) |> Enum.join(", ") |> (&"#{name} computed #{result} from #{&1}").() end) result end @spec log_result(boolean(), String.t()) :: boolean() def log_result(result, name) when is_boolean(result) do Logger.debug(fn -> "#{name} computed #{result}" end) result end defp check_constraints(%{"constraints" => constraints}, context), do: Constraint.verify_all(constraints, context) defp check_constraints(_strategy, _context), do: true end
lib/unleash/strategy.ex
0.882212
0.70005
strategy.ex
starcoder
defmodule TimeZoneInfo.ExternalTermFormat do @moduledoc false # Encodes and decodes the `TimeZoneInfo.data`. @doc """ Encodes `TimeZoneInfo.data` to binary. """ @spec encode(TimeZoneInfo.data()) :: {:ok, binary()} | {:error, atom()} def encode(term) do with {:ok, term} <- validate(term) do binary = :erlang.term_to_binary(term, compressed: 9) {:ok, binary} end end @doc """ Decodes `TimeZoneInfo.data` from binary. """ @spec decode(binary()) :: {:ok, TimeZoneInfo.data()} | {:error, atom()} def decode(binary) do binary |> :erlang.binary_to_term([:safe]) |> validate() rescue _error -> {:error, :decode_fails} end @doc """ Returns a checksum for the `binary` or `TimeZoneInfo.data`. """ @spec checksum(TimeZoneInfo.data() | binary()) :: {:ok, String.t()} | {:error, term()} def checksum(data) when is_map(data) do {:ok, data |> :erlang.phash2() |> to_string()} end def checksum(data) when is_binary(data) do with {:ok, term} <- decode(data) do checksum(term) end end # These functions validate the decoded data. The code ensures also that all # needed `atoms` are available that `:erlang.binary_to_term(binary, [:safe])` # raises not an error. # This could still be replaced by Xema. defp validate(term) when is_map(term) do with :ok <- validate(:keys, term), :ok <- validate(:links, term), :ok <- validate(:version, term), :ok <- validate(:config, term), :ok <- validate(:rules, term), :ok <- validate(:time_zones, term) do {:ok, term} end end defp validate(_term), do: {:error, :invalid_data} defp validate(:keys, term) do term |> Map.keys() |> Enum.sort() |> Kernel.==([:config, :links, :rules, :time_zones, :version]) |> check(:invalid_keys) end defp validate(:version, term) do term |> Map.get(:version) |> is_binary() |> check(:invalid_version) end defp validate(:config, %{config: config}) do with :ok <- validate(:config_time_zones, config[:time_zones]), :ok <- validate(:config_lookahead, config[:lookahead]), :ok <- validate(:config_files, config[:files]) do :ok end end defp validate(:config_time_zones, :all), do: :ok defp validate(:config_time_zones, list) when is_list(list) do list |> Enum.all?(fn time_zone -> is_binary(time_zone) end) |> check({:invalid_config, [time_zones: list]}) end defp validate(:config_time_zones, value), do: {:error, {:invalid_config, [time_zones: value]}} defp validate(:config_lookahead, lookahead) when is_integer(lookahead) and lookahead > 0, do: :ok defp validate(:config_lookahead, value), do: {:error, {:invalid_config, [lookahead: value]}} defp validate(:config_files, list) when is_list(list) do list |> Enum.all?(fn time_zone -> is_binary(time_zone) end) |> check({:invalid_config, [files: list]}) end defp validate(:config_files, value), do: {:error, {:invalid_config, [files: value]}} defp validate(:links, term) do term |> Map.get(:links) |> Enum.all?(fn {from, to} when is_binary(from) and is_binary(to) -> true _ -> false end) |> check(:invalid_links) end defp validate(:time_zones, term) do term |> Map.get(:time_zones) |> Enum.all?(fn {name, zone_states} when is_binary(name) -> validate(:zone_states, zone_states) _ -> false end) |> check(:invalid_time_zones) end defp validate(:zone_states, zone_states) do Enum.all?(zone_states, fn {at, {utc_offset, std_offset, zone_abbr, wall_period}} when at >= 0 -> validate(:zone_state, {utc_offset, std_offset, zone_abbr, wall_period}) {at, zone_rule} when at >= 0 -> validate(:zone_rule, zone_rule) _ -> false end) end defp validate(:zone_state, {utc_offset, std_offset, zone_abbr, {since, until}}) do zone_state = is_integer(utc_offset) && is_integer(std_offset) && is_binary(zone_abbr) since = since == :min || match?(%NaiveDateTime{}, since) until = until == :max || match?(%NaiveDateTime{}, until) zone_state && since && until end defp validate(:zone_rule, {utc_offset, rules, format}) when is_integer(utc_offset) and is_binary(rules) and is_tuple(format) do validate(:format, format) end defp validate(:zone_state, _), do: false defp validate(:format, format) do case format do {:choice, [one, two]} when is_binary(one) and is_binary(two) -> true {:template, template} when is_binary(template) -> true {:string, string} when is_binary(string) -> true _ -> false end end # {{10, [last_day_of_week: 7], 1, 0, 0}, :utc, 0, nil}, # {{3, [last_day_of_week: 7], 1, 0, 0}, :utc, 3600, "S"} defp validate(:rules, term) do term |> Map.get(:rules) |> Enum.all?(fn {name, rule_set} when is_binary(name) -> validate(:rule_set, rule_set) _ -> false end) |> check(:invalid_rules) end defp validate(:rule_set, rule_set) when is_list(rule_set) do Enum.all?(rule_set, fn rule -> validate(:rule, rule) end) end defp validate(:rule_set, _), do: false defp validate(:rule, {at, time_standard, utc_offset, letters}) when is_tuple(at) and time_standard in [:wall, :standard, :gmt, :utc, :zulu] and is_integer(utc_offset) and (is_binary(letters) or is_nil(letters)) do validate(:at, at) end defp validate(:rule, _), do: false defp validate(:at, {month, day, {hour, minute, second}}) when is_integer(month) and is_integer(hour) and is_integer(minute) and is_integer(second) do validate(:day, day) end defp validate(:at, _), do: false defp validate(:day, day) when is_integer(day), do: true defp validate(:day, day) when is_list(day) do case day do [last_day_of_week: day] when is_integer(day) -> true [day: day, op: op, day_of_week: day_of_week] when is_integer(day) and op in [:le, :ge] and is_integer(day_of_week) -> true _ -> false end end defp validate(:day, _), do: false defp check(true, _), do: :ok defp check(false, error), do: {:error, error} end
lib/time_zone_info/external_term_format.ex
0.884558
0.598752
external_term_format.ex
starcoder
defmodule Nixa.Shared do @moduledoc """ Utility functions """ import Nx.Defn @empty_mapset MapSet.new() @doc """ Provide function guard to check for empty MapSet """ defguard empty_mapset?(some_map_set) when some_map_set == @empty_mapset @doc """ Provide simple log2 function """ defn log2(x) do Nx.log(x) / Nx.log(2.0) end @doc """ Provide simple log10 function """ defn log10(x) do Nx.log(x) / Nx.log(10.0) end @doc """ Given a set of values, identify all unique categories and return them """ def get_categories(vals) do vals = if is_list(Enum.fetch!(vals, 0)), do: vals, else: Enum.map(vals, fn v -> [v] end) cats = vals |> Enum.fetch!(0) |> Enum.map(fn _ -> MapSet.new() end) vals |> Enum.reduce(cats, fn i, acc -> update_categories_mapset(i, acc) end) |> Enum.map(fn ms -> MapSet.to_list(ms) end) end defp update_categories_mapset(i, acc) do acc |> Enum.zip(i) |> Enum.map(fn {s, val} -> MapSet.put(s, val) end) end @doc """ Given a set of values and the category mappings identified by get_categories/1, transform to numerical IDs """ def categorical_to_numeric([], _cats), do: [] def categorical_to_numeric([input | rest], cats) do numval = cats |> Enum.zip(input) |> Enum.map(fn {cs, val} -> Enum.find_index(cs, fn x -> x == val end) end) [numval | categorical_to_numeric(rest, cats)] end @doc """ Provide basic linspace functionality, similar to numpy.linspace """ def linspace(start, stop, num, opts \\ []) do start = safe_to_tensor(start) stop = safe_to_tensor(stop) num = safe_to_scalar(num) endpoint = Keyword.get(opts, :endpoint, true) step = if endpoint, do: calc_linspace_step_size(start, stop, Nx.subtract(num, 1)), else: calc_linspace_step_size(start, stop, num) values = for x <- 0..num-1, do: calc_linspace_step(start, step, x) Nx.stack(values) end defnp calc_linspace_step_size(start, stop, num) do Nx.abs(start - stop) / num end defnp calc_linspace_step(start, step, x) do start + step * x end @doc """ Get the most common class from targets """ def get_argmax_target(targets) do t_targets = targets |> Nx.concatenate() argmax = t_targets |> frequencies() |> Nx.argmax() |> Nx.to_scalar() t_targets |> Nx.to_flat_list() |> MapSet.new() |> MapSet.to_list() |> Enum.fetch!(argmax) end @doc """ Get the mean value of all targets """ def get_mean_target(targets) do targets |> Nx.concatenate() |> Nx.mean() |> Nx.new_axis(0) end @doc """ Calculate the frequencies of values in a tensor """ def frequencies(%Nx.Tensor{} = t) do t |> Nx.to_flat_list() |> MapSet.new() |> MapSet.to_list() |> Nx.tensor() |> Nx.map(fn c -> Nx.equal(t, c) |> Nx.sum() end) end @doc """ Provide Nx.tensor/2 functionality that is type-aware and can be blindly used on Nx.Tensor and scalars """ def safe_to_tensor(%Nx.Tensor{} = t, _opts), do: t def safe_to_tensor(t, opts), do: Nx.tensor(t, opts) def safe_to_tensor(%Nx.Tensor{} = t), do: t def safe_to_tensor(t), do: Nx.tensor(t) @doc """ Provide Nx.to_scalar/1 functionality that is type-aware and can be blindly used on Nx.Tensor and scalars """ def safe_to_scalar(%Nx.Tensor{} = t), do: Nx.to_scalar(t) def safe_to_scalar(t), do: t end
lib/nixa/shared.ex
0.748995
0.549701
shared.ex
starcoder
defmodule JaNestedParams do @moduledoc """ Parse `Json Api v1.0` parameters. Additionally, deviate from the specification by allowing the building of nested parameters, just like those consumed by Ecto's changeset at initialization. Make use of the `included` array of the Json Api specification. This does not conform to spec beacuse, as of v1.0, it does not expect compound documents traveling from client side back to server. More over, leverage the `meta` object, within `resource linkage` and `resource object`, to allow the inclusion of new resources in the `included` array. These new resources won't have an `id`, but still need to be distinguishable among other new resources of the same type. The `meta` object is expected to contain a member called `new-resource-tag` whenever an object doesn't have an id yet. The value of `new-resource-tag` must be unique within a group of new resources of the same type. If the primary data is not an array, but a single resource, no `id` member is needed if said resource is new, as provided by the specification. Nor anything needs to be said about it in the `meta` object. """ import Enum, only: [with_index: 1] @doc """ Accept an Json Api-ish document that may have an `included` array, which may also contain new resources that conform to the clauses explained above. """ def to_attributes(%{"data" => data, "included" => included}) when is_list(included) do as_attributes(data, included) end def to_attributes(%{"data" => data, "included" => _}), do: as_attributes(data, []) def to_attributes(%{"data" => data}), do: as_attributes(data, []) def to_attributes(data) when is_map(data), do: as_attributes(data, []) defp primary_linkage_key(%{"type" => _, "id" => _} = resource) do resource |> Map.take(["type", "id"]) |> linkage_key() end defp primary_linkage_key(%{"type" => type} ), do: {type,"",1} defp linkage_key(%{"meta" => %{"new-resource-tag" => newTag }, "type" => type} = resource) when not is_nil(newTag) do case resource do %{"id" => nil} -> nil %{"id" => ""} -> nil %{"id" => id} -> {type,id,0} _ -> {type,"",newTag} end end defp linkage_key(%{"type" => _, "id" => nil}), do: nil defp linkage_key(%{"type" => type, "id" => id}), do: {type,id,0} defp linkage_key(_), do: nil defp add_id_member(struct, %{"id" => id}), do: Map.put_new(struct, "id", id) defp add_id_member(struct, _), do: struct defp as_attributes(data, included) do id_key = primary_linkage_key data includedSet = Map.new included, &({linkage_key(&1), &1}) { object , _ } = repeat id_key, data , MapSet.new(), includedSet object end defp follow_many(name,linkages,accum,included) do { branches, idList, newAccum } = Enum.reduce linkages, {[], [], accum}, fn(identifier, {many,ids,acc})-> with {_, keyId, _} = newKey <- linkage_key(identifier) do case included do %{^newKey => something } -> {newBranch, oneMore} = repeat(newKey, something, acc, included) { [newBranch | many] , (if keyId == "", do: ids, else: [keyId | ids]) , oneMore } _ -> { many, (if keyId == "", do: ids, else: [keyId | ids]) , MapSet.put(acc, newKey) } end else _ -> { many, ids, acc } end end { [ {name, Map.new(for {a, b} <- with_index(branches), do: {b, a})}, {"#{name}_ids", idList} ] , newAccum } end defp follow_one(name,identifier,accum,included) do with {_, keyId, _} = newKey <- linkage_key(identifier) do case included do %{^newKey => something} -> {newBranch, oneMore} = repeat(newKey, something, accum, included) { [ {name, newBranch} ] ++ (if keyId == "", do: [], else: [{"#{name}_id", keyId}]), oneMore } _ -> { (if keyId == "", do: [], else: [{"#{name}_id", keyId}]), MapSet.put(accum,newKey) } end else _ -> { [], accum } end end defp repeat(key, branch, visited, included) do visitedAndMe = MapSet.put(visited, key) { params, increasedVisits } = case {MapSet.member?(visited, key), branch} do {false, %{"relationships" => rels} } -> {propertyList, newVisited} = Enum.flat_map_reduce rels, visitedAndMe, fn ({name, %{"data" => nil}}, accum) -> { [ {"#{name}_id", nil} ] , accum } ({name, %{"data" => linkages}}, accum) when is_list(linkages) -> follow_many(name, linkages, accum, included) ({name, %{"data" => identifier} }, accum) -> follow_one(name, identifier, accum, included) (_any, accum) -> {[], accum} end {Map.new(propertyList), newVisited} _ -> {%{}, visitedAndMe} end { Map.merge(params, branch["attributes"] || %{}) |> add_id_member(branch) , increasedVisits } end end
lib/ja_nested_params.ex
0.574156
0.451447
ja_nested_params.ex
starcoder
defmodule ElixirScript.FFI do @moduledoc """ The Foreign Function Interface (FFI) for interacting with JavaScript To define a foreign module, make a new module and add `use ElixirScript.FFI` to it. To define external functions, use the `defexternal` macro. Here is an example of a foreign module for a JSON module ```elixir defmodule MyApp.JSON do use ElixirScript.FFI defexternal stringify(map) defexternal parse(string) end ``` Foreign modules map to JavaScript files that export functions defined with the `defexternal` macro. ElixirScript expects JavaScript modules to be in the `priv/elixir_script` directory. These modules are copied to the output directory upon compilation. For our example, a JavaScript file must be placed in the `priv/elixir_script` folder. In our example, it could either be `priv/elixir_script/my_app/json.js` or `priv/elixir_script/my_app.json.js`. ElixirScript will look for either path It looks like this ```javascript export default { stringify: JSON.stringify, parse: JSON.parse } ``` `ElixirScript.FFI` takes the following options * `global`: If the module is defined in the global state or not. If this is set to `true`, nothing is imported and instead ElixirScript will use the name of the module to call a module and function in the global scope. * `name`: Only applicable with `global` is set to `true`. This will use the name defined here instead of the module name for calling modules and functions in the global scope An example using the global option to reference the JSON module in browsers ```elixir defmodule JSON do use ElixirScript.FFI, global: true defexternal stringify(map) defexternal parse(string) end ``` The calls above are translated to calls to the `JSON` module in the global scope An example using global and name options ```elixir defmodule Console do use ElixirScript.FFI, global: true, name: :console defexternal log(term) end ``` With the above, calls in ElixirScript to `Console.log` will translate to `console.log` in JavaScript """ defmacro __using__(opts) do quote do import ElixirScript.FFI Module.register_attribute(__MODULE__, :__foreign_info__, persist: true) @__foreign_info__ %{ path: Macro.underscore(__MODULE__), name: unquote(Keyword.get(opts, :name, nil)), global: unquote(Keyword.get(opts, :global, false)) } end end @doc """ Defines a JavaScript function to be called from Elixir modules To define an external function, pass the name and arguments to `defexternal` ```elixir defexternal my_js_function(arg1, arg2, arg3) ``` """ defmacro defexternal({name, _, args}) do args = Enum.map(args, fn {:\\, meta0, [{name, meta, atom}, value]} -> name = String.to_atom("_" <> Atom.to_string(name)) {:\\, meta0, [{name, meta, atom}, value]} {name, meta, atom} -> name = String.to_atom("_" <> Atom.to_string(name)) {name, meta, atom} other -> other end) quote do def unquote(name)(unquote_splicing(args)), do: nil end end end
lib/elixir_script/ffi.ex
0.878471
0.848847
ffi.ex
starcoder
defmodule ExClearbit.Model.Person do @moduledoc """ The struct for the Person data returned from Clearbit """ @derive [Poison.Encoder] defstruct [ :aboutme, :angellist, :avatar, :bio, :email_provider, :employment, :facebook, :fuzzy, :gender, :geo, :github, :googleplus, :gravatar, :id, :indexed_at, :linkedin, :location, :name, :site, :time_zone, :twitter, :utc_offset ] @type t :: %__MODULE__{} use ExConstructor end defmodule ExClearbit.Model.NameToDomain do @moduledoc """ The struct for the NameToDomain data returned from Clearbit """ @derive [Poison.Encoder] defstruct [ :domain, :logo, :name ] @type t :: %__MODULE__{} use ExConstructor end defmodule ExClearbit.Model.Company do @moduledoc """ The struct for the Company data returned from Clearbit """ @derive [Poison.Encoder] defstruct [ :angel_list, :category, :crunchbase, :description, :domain, :domain_aliases, :email_provider, :facebook, :founded_year, :geo, :id, :indexed_at, :legal_name, :linkedin, :location, :logo, :metrics, :name, :phone, :site, :tags, :tech, :time_zone, :twitter, :type, :utc_offset ] @type t :: %__MODULE__{} use ExConstructor end defmodule ExClearbit.Model.Prospector.Person do @moduledoc """ Struct for each Prospect person result from Clearbit """ @derive [Poison.Encoder] defstruct [ :id, :name, :title, :role, :sub_role, :seniority, :company, :email, :verified, :phone, ] @type t :: %__MODULE__{} use ExConstructor end defmodule ExClearbit.Model.Prospector.Results do @moduledoc """ Struct for containing list of prospect results from Clearbit """ @derive [Poison.Encoder] defstruct [ :page, :page_size, :total, :results, ] @type t :: %__MODULE__{ page: non_neg_integer, page_size: non_neg_integer, total: non_neg_integer, results: list(ExClearbit.Model.Prospector.Person.t) } use ExConstructor end defmodule ExClearbit.Model.Reveal do @moduledoc """ Struct for company retrieved by the Reveal API """ @derive [Poison.Encoder] defstruct [ :ip, :fuzzy, :domain, :type, :company, :geo_ip, ] @type t :: %__MODULE__{ ip: binary, fuzzy: boolean, domain: binary | nil, type: binary, geo_ip: map, company: ExClearbit.Model.Company.t | nil, } use ExConstructor end
lib/ex_clearbit/model.ex
0.685107
0.492798
model.ex
starcoder
defmodule ExCmd.Process do @moduledoc """ Server to interact with external process `ExCmd.stream!` should be preferred over this. Use this only if you need more control over the life-cycle of IO streams and OS process. """ defmodule Error do defexception [:message] end @default [log: false] @doc """ Starts a process using `cmd_with_args` and with options `opts` `cmd_with_args` must be a list containing command with arguments. example: `["cat", "file.txt"]`. ### Options * `cd` - the directory to run the command in * `env` - a list of tuples containing environment key-value. These can be accessed in the external program * `log` - When set to `true` odu logs and command stderr output are logged. Defaults to `false` """ @spec start_link(nonempty_list(String.t()), cd: String.t(), env: [{String.t(), String.t()}], log: boolean() ) :: {:ok, pid()} | {:error, any()} def start_link([cmd | args], opts \\ []) do opts = Keyword.merge(@default, opts) odu_path = odu_path() if !odu_path do raise Error, message: "'odu' executable not found" end cmd_path = :os.find_executable(to_charlist(cmd)) if !cmd_path do raise Error, message: "'#{cmd}' executable not found" end GenStateMachine.start_link(__MODULE__, %{ odu_path: odu_path, cmd_with_args: [to_string(cmd_path) | args], opts: opts }) end # client @doc """ Return bytes written by the program to output stream. This blocks until the programs write and flush the output """ @spec read(pid, non_neg_integer | :infinity) :: {:ok, iodata} | :eof | {:error, String.t()} | :closed def read(server, timeout \\ :infinity) do GenStateMachine.call(server, :read, timeout) end @doc """ Writes iodata `data` to programs input streams This blocks when the pipe is full """ @spec write(pid, iodata, non_neg_integer | :infinity) :: :ok | {:error, String.t()} | :closed def write(server, data, timeout \\ :infinity) do GenStateMachine.call(server, {:write, data}, timeout) end @doc """ Closes input stream. Which signal EOF to the program """ @spec close_stdin(pid) :: :ok | {:error, any()} def close_stdin(server), do: GenStateMachine.call(server, :close_stdin) @doc """ Kills the program """ def stop(server), do: GenStateMachine.stop(server, :normal) @doc """ Returns status of the process. It will be either of `:started`, `{:done, exit_status}` """ @spec status(pid) :: :started | {:done, integer()} def status(server), do: GenStateMachine.call(server, :status) @doc """ Returns os pid of the command """ @spec os_pid(pid) :: integer() def os_pid(server), do: GenStateMachine.call(server, :os_pid) @doc """ Waits for the program to terminate. If the program terminates before timeout, it returns `{:ok, exit_status}` else returns `:timeout` """ @spec await_exit(pid, timeout: timeout()) :: {:ok, integer()} | :timeout def await_exit(server, timeout \\ :infinity), do: GenStateMachine.call(server, {:await_exit, timeout}) @doc """ Returns [port_info](http://erlang.org/doc/man/erlang.html#port_info-1) """ def port_info(server), do: GenStateMachine.call(server, :port_info) ## server require Logger use GenStateMachine, callback_mode: :handle_event_function @doc false defmacro send_input, do: 1 @doc false defmacro send_output, do: 2 @doc false defmacro output, do: 3 @doc false defmacro input, do: 4 @doc false defmacro close_input, do: 5 @doc false defmacro output_eof, do: 6 @doc false defmacro command_env, do: 7 @doc false defmacro os_pid, do: 8 @doc false defmacro start_error, do: 9 # 4 byte length prefix + 1 byte tag @max_chunk_size 64 * 1024 - 5 def init(params) do actions = [{:next_event, :internal, :setup}] {:ok, :init, params, actions} end def handle_event(:internal, :setup, :init, params) do Process.flag(:trap_exit, true) odu_opts = Keyword.take(params.opts, [:log, :cd]) port = start_odu_port(params.odu_path, params.cmd_with_args, odu_opts) send_env(params.opts[:env], port) os_pid = receive do {^port, {:data, <<os_pid()::unsigned-integer-8, os_pid::big-unsigned-integer-32>>}} -> Logger.debug("Command started. os pid: #{os_pid}") os_pid {^port, {:data, <<start_error()::unsigned-integer-8, reason::binary>>}} -> Logger.error("Failed to start odu. reason: #{reason}") raise Error, message: "Failed to start odu" after 5_000 -> raise Error, message: "Failed to start command" end data = %{ pending_write: [], pending_read: [], input_ready: false, waiting_processes: MapSet.new(), port: port, os_pid: os_pid } {:next_state, :started, data, []} end def handle_event({:call, from}, {:await_exit, timeout}, state, data) do case state do {:done, exit_status} -> {:keep_state_and_data, [{:reply, from, {:ok, exit_status}}]} _ -> actions = [{{:timeout, {:await_exit, from}}, timeout, nil}] data = %{data | waiting_processes: MapSet.put(data.waiting_processes, from)} {:keep_state, data, actions} end end def handle_event({:call, from}, :status, state, _data) do {:keep_state_and_data, [{:reply, from, state}]} end def handle_event({:call, from}, :os_pid, _state, %{os_pid: os_pid}) do {:keep_state_and_data, [{:reply, from, os_pid}]} end def handle_event({:call, from}, :port_info, state, data) when state not in [:init, :setup] do {:keep_state_and_data, [{:reply, from, Port.info(data.port)}]} end def handle_event(:internal, :input_ready, _state, data) do {data, actions} = try_sending_input(data) {:keep_state, data, actions} end def handle_event({:call, from}, {:write, iodata}, :started, data) do bin = IO.iodata_to_binary(iodata) data = %{data | pending_write: data.pending_write ++ [{from, bin}]} {data, actions} = try_sending_input(data) {:keep_state, data, actions} end def handle_event({:call, from}, {:write, _iodata}, _state, _) do {:keep_state_and_data, [{:reply, from, {:error, :epipe}}]} end def handle_event({:call, from}, :read, state, data) when state in [:started, :input_closed] do {:keep_state, request_output(from, data), []} end def handle_event({:call, from}, :read, _state, _) do {:keep_state_and_data, [{:reply, from, :eof}]} end def handle_event({:call, from}, :close_stdin, :started, data) do {data, actions} = close_stream(:stdin, from, data) {data, write_actions} = handle_stdin_close(data) {:next_state, :input_closed, data, actions ++ write_actions} end def handle_event({:call, from}, :close_stdin, _, _data) do {:keep_state_and_data, [{:reply, from, :ok}]} end def handle_event(:info, {:EXIT, port, _reason}, state, %{port: port} = data) do {data, write_actions} = handle_stdin_close(data) {data, read_actions} = handle_eof(data) {data, await_exit_actions} = reply_await_exit(data, {:error, :stopped}) if state in [:started, :input_closed] do {:next_state, :port_closed, data, write_actions ++ read_actions ++ await_exit_actions} else {:keep_state, data, write_actions ++ read_actions ++ await_exit_actions} end end def handle_event(:info, {:EXIT, _, reason}, _, data) do {:stop_and_reply, reason, [], data} end def handle_event(:info, {port, {:exit_status, exit_status}}, _, %{port: port} = data) do Logger.debug("command exited with status: #{exit_status}") {data, write_actions} = handle_stdin_close(data) {data, read_actions} = handle_eof(data) {data, await_exit_actions} = reply_await_exit(data, {:ok, exit_status}) {:next_state, {:done, exit_status}, data, write_actions ++ read_actions ++ await_exit_actions} end def handle_event(:info, {port, {:data, output}}, _, %{port: port} = data) do <<tag::unsigned-integer-8, bin::binary>> = output {data, actions} = handle_command(tag, bin, data) {:keep_state, data, actions} end def handle_event({:timeout, {:await_exit, from}}, _, _, data) do {:keep_state, %{data | waiting_processes: MapSet.delete(data.waiting_processes, from)}, [{:reply, from, :timeout}]} end defp start_odu_port(odu_path, cmd_with_args, opts) do args = build_odu_params(opts) ++ ["--" | cmd_with_args] options = [:use_stdio, :exit_status, :binary, :hide, {:packet, 4}, args: args] Port.open({:spawn_executable, odu_path}, options) end @odu_protocol_version "1.0" defp build_odu_params(opts) do cd = Path.expand(opts[:cd] || File.cwd!()) if !File.exists?(cd) || !File.dir?(cd) do raise Error, message: ":cd is not a valid path" end params = ["-cd", cd, "-protocol_version", @odu_protocol_version] if opts[:log] do params ++ ["-log", "|2"] else params end end defp handle_command(output_eof(), <<>>, data) do handle_eof(data) end defp handle_command(output(), bin, %{pending_read: [pid | pending]} = data) do actions = [{:reply, pid, {:ok, bin}}] data = if Enum.empty?(pending) do %{data | pending_read: []} else send_command(send_output(), <<>>, data.port) %{data | pending_read: pending} end {data, actions} end defp handle_command(send_input(), <<>>, data) do data = %{data | input_ready: true} actions = [{:next_event, :internal, :input_ready}] {data, actions} end defp send_env(nil, port), do: send_env([], port) defp send_env(env, port) do payload = Enum.map(env, fn {key, value} -> entry = String.trim(key) <> "=" <> String.trim(value) if byte_size(entry) > 65536 do raise Error, message: "Env entry length exceeds limit" end <<byte_size(entry)::big-unsigned-integer-16, entry::binary>> end) |> Enum.join() send_command(command_env(), payload, port) end defp handle_stdin_close(data) do actions = Enum.flat_map(data.pending_write, fn {pid, _} -> [{:reply, pid, {:error, :epipe}}] end) {%{data | pending_write: []}, actions} end defp handle_eof(data) do actions = Enum.flat_map(data.pending_read, fn pid -> [{:reply, pid, :eof}] end) {%{data | pending_read: []}, actions} end defp reply_await_exit(data, response) do actions = Enum.flat_map(data.waiting_processes, fn pid -> [{:reply, pid, response}, {{:timeout, {:await_exit, pid}}, :infinity, nil}] end) {%{data | waiting_processes: MapSet.new()}, actions} end defp try_sending_input(%{pending_write: [{pid, bin} | pending], input_ready: true} = data) do {chunk, bin} = binary_split_at(bin, @max_chunk_size) send_command(input(), chunk, data.port) if bin == <<>> do actions = [{:reply, pid, :ok}] data = %{data | pending_write: pending, input_ready: false} {data, actions} else data = %{data | pending_write: [{pid, bin} | pending], input_ready: false} {data, []} end end defp try_sending_input(data) do {data, []} end defp request_output(from, %{pending_read: []} = data) do send_command(send_output(), <<>>, data.port) %{data | pending_read: [from]} end defp request_output(from, data) do %{data | pending_read: data.pending_read ++ [from]} end defp close_stream(:stdin, pid, data) do send_command(close_input(), <<>>, data.port) actions = [{:reply, pid, :ok}] {data, actions} end defp send_command(tag, bin, port) do bin = <<tag::unsigned-integer-8, bin::binary>> Port.command(port, bin) end defp binary_split_at(bin, pos) when byte_size(bin) <= pos, do: {bin, <<>>} defp binary_split_at(bin, pos) do len = byte_size(bin) {binary_part(bin, 0, pos), binary_part(bin, pos, len - pos)} end defp odu_path do Application.app_dir(:ex_cmd, "priv") |> Path.join(Mix.Tasks.Compile.Odu.executable_name()) end end
lib/ex_cmd/process.ex
0.800107
0.445107
process.ex
starcoder
defmodule MuonTrap.Daemon do use GenServer require Logger @moduledoc """ Wrap an OS process in a GenServer so that it can be supervised. For example, in your children list add MuonTrap.Daemon like this: ```elixir children = [ {MuonTrap.Daemon, ["my_server", ["--options", "foo")], [cd: "/some_directory"]]} ] opts = [strategy: :one_for_one, name: MyApplication.Supervisor] Supervisor.start_link(children, opts) ``` In the `child_spec` tuple, the second element is a list that corresponds to the `MuonTrap.cmd/3` parameters. I.e., The first item in the list is the program to run, the second is a list of commandline arguments, and the third is a list of options. The same options as `MuonTrap.cmd/3` are available with the following additions: * `:name` - Name the Daemon GenServer * `:log_output` - When set, send output from the command to the Logger. Specify the log level (e.g., `:debug`) * `:log_prefix` - Prefix each log message with this string (defaults to the program's path) * `:stderr_to_stdout` - When set to `true`, redirect stderr to stdout. Defaults to `false`. If you want to run multiple `MuonTrap.Daemon`s under one supervisor, they'll all need unique IDs. Use `Supervisor.child_spec/2` like this: ```elixir Supervisor.child_spec({MuonTrap.Daemon, ["my_server"), []]}, id: :server1) ``` """ defmodule State do @moduledoc false defstruct [:command, :port, :cgroup_path, :log_output, :log_prefix] end def child_spec([command, args]) do child_spec([command, args, []]) end def child_spec([command, args, opts]) do %{ id: __MODULE__, start: {__MODULE__, :start_link, [command, args, opts]}, type: :worker, restart: :permanent, shutdown: 500 } end @doc """ Start/link a deamon GenServer for the specified command. """ @spec start_link(binary(), [binary()], keyword()) :: GenServer.on_start() def start_link(command, args, opts \\ []) do {genserver_opts, opts} = case Keyword.pop(opts, :name) do {nil, _opts} -> {[], opts} {name, new_opts} -> {[name: name], new_opts} end GenServer.start_link(__MODULE__, [command, args, opts], genserver_opts) end @doc """ Get the value of the specified cgroup variable. """ @spec cgget(GenServer.server(), binary(), binary()) :: {:ok, String.t()} | {:error, File.posix()} def cgget(server, controller, variable_name) do GenServer.call(server, {:cgget, controller, variable_name}) end @doc """ Modify a cgroup variable. """ @spec cgset(GenServer.server(), binary(), binary(), binary()) :: :ok | {:error, File.posix()} def cgset(server, controller, variable_name, value) do GenServer.call(server, {:cgset, controller, variable_name, value}) end @doc """ Return the OS pid to the muontrap executable. """ @spec os_pid(GenServer.server()) :: non_neg_integer() def os_pid(server) do GenServer.call(server, :os_pid) end @impl true def init([command, args, opts]) do options = MuonTrap.Options.validate(:daemon, command, args, opts) port_options = MuonTrap.Port.port_options(options) ++ [{:line, 256}] port = Port.open({:spawn_executable, to_charlist(MuonTrap.muontrap_path())}, port_options) {:ok, %State{ command: command, port: port, cgroup_path: Map.get(options, :cgroup_path), log_output: Map.get(options, :log_output), log_prefix: Map.get(options, :log_prefix, command <> ": ") }} end alias MuonTrap.Cgroups @impl true def handle_call({:cgget, controller, variable_name}, _from, %{cgroup_path: cgroup_path} = state) do result = Cgroups.cgget(controller, cgroup_path, variable_name) {:reply, result, state} end @impl true def handle_call( {:cgset, controller, variable_name, value}, _from, %{cgroup_path: cgroup_path} = state ) do result = Cgroups.cgset(controller, cgroup_path, variable_name, value) {:reply, result, state} end @impl true def handle_call(:os_pid, _from, state) do {:os_pid, os_pid} = Port.info(state.port, :os_pid) {:reply, os_pid, state} end @impl true def handle_info({_port, {:data, _}}, %State{log_output: nil} = state) do # Ignore output {:noreply, state} end @impl true def handle_info( {port, {:data, {_, message}}}, %State{port: port, log_output: log_level, log_prefix: prefix} = state ) do _ = Logger.log(log_level, [prefix, message]) {:noreply, state} end @impl true def handle_info({port, {:exit_status, status}}, %State{port: port} = state) do reason = case status do 0 -> _ = Logger.info("#{state.command}: Process exited successfully") :normal _failure -> _ = Logger.error("#{state.command}: Process exited with status #{status}") :error_exit_status end {:stop, reason, state} end end
lib/muontrap/daemon.ex
0.829975
0.748536
daemon.ex
starcoder
defmodule Stream.Reducers do # Collection of reducers and utilities shared by Enum and Stream. @moduledoc false def chunk_every(chunk_by, enumerable, count, step, leftover) do limit = :erlang.max(count, step) chunk_fun = fn entry, {acc_buffer, acc_count} -> acc_buffer = [entry | acc_buffer] acc_count = acc_count + 1 new_state = if acc_count >= limit do remaining = acc_count - step {Enum.take(acc_buffer, remaining), remaining} else {acc_buffer, acc_count} end if acc_count == count do {:cont, :lists.reverse(acc_buffer), new_state} else {:cont, new_state} end end after_fun = fn {acc_buffer, acc_count} -> if leftover == :discard or acc_count == 0 or acc_count >= count do {:cont, []} else {:cont, :lists.reverse(acc_buffer, Enum.take(leftover, count - acc_count)), []} end end chunk_by.(enumerable, {[], 0}, chunk_fun, after_fun) end def chunk_by(chunk_by, enumerable, fun) do chunk_fun = fn entry, nil -> {:cont, {[entry], fun.(entry)}} entry, {acc, value} -> case fun.(entry) do ^value -> {:cont, {[entry | acc], value}} new_value -> {:cont, :lists.reverse(acc), {[entry], new_value}} end end after_fun = fn nil -> {:cont, :done} {acc, _value} -> {:cont, :lists.reverse(acc), :done} end chunk_by.(enumerable, nil, chunk_fun, after_fun) end defmacro dedup(callback, fun \\ nil) do quote do fn entry, acc(head, prev, tail) = acc -> value = unquote(callback).(entry) case prev do {:value, ^value} -> skip(acc) _ -> next_with_acc(unquote(fun), entry, head, {:value, value}, tail) end end end end defmacro drop(fun \\ nil) do quote do fn _entry, acc(head, amount, tail) when amount > 0 -> skip(acc(head, amount - 1, tail)) entry, acc(head, amount, tail) -> next_with_acc(unquote(fun), entry, head, amount, tail) end end end defmacro drop_every(nth, fun \\ nil) do quote do fn entry, acc(head, curr, tail) when curr in [unquote(nth), :first] -> skip(acc(head, 1, tail)) entry, acc(head, curr, tail) -> next_with_acc(unquote(fun), entry, head, curr + 1, tail) end end end defmacro drop_while(callback, fun \\ nil) do quote do fn entry, acc(head, bool, tail) = original -> if bool and unquote(callback).(entry) do skip(original) else next_with_acc(unquote(fun), entry, head, false, tail) end end end end defmacro filter(callback, fun \\ nil) do quote do fn entry, acc -> if unquote(callback).(entry) do next(unquote(fun), entry, acc) else skip(acc) end end end end defmacro filter_map(filter, mapper, fun \\ nil) do quote do fn entry, acc -> if unquote(filter).(entry) do next(unquote(fun), unquote(mapper).(entry), acc) else skip(acc) end end end end defmacro map(callback, fun \\ nil) do quote do fn entry, acc -> next(unquote(fun), unquote(callback).(entry), acc) end end end defmacro map_every(nth, mapper, fun \\ nil) do quote do fn entry, acc(head, curr, tail) when curr in [unquote(nth), :first] -> next_with_acc(unquote(fun), unquote(mapper).(entry), head, 1, tail) entry, acc(head, curr, tail) -> next_with_acc(unquote(fun), entry, head, curr + 1, tail) end end end defmacro reject(callback, fun \\ nil) do quote do fn entry, acc -> unless unquote(callback).(entry) do next(unquote(fun), entry, acc) else skip(acc) end end end end defmacro scan2(callback, fun \\ nil) do quote do fn entry, acc(head, :first, tail) -> next_with_acc(unquote(fun), entry, head, {:ok, entry}, tail) entry, acc(head, {:ok, acc}, tail) -> value = unquote(callback).(entry, acc) next_with_acc(unquote(fun), value, head, {:ok, value}, tail) end end end defmacro scan3(callback, fun \\ nil) do quote do fn entry, acc(head, acc, tail) -> value = unquote(callback).(entry, acc) next_with_acc(unquote(fun), value, head, value, tail) end end end defmacro take(fun \\ nil) do quote do fn entry, acc(head, curr, tail) = original -> case curr do 0 -> {:halt, original} 1 -> {_, acc} = next_with_acc(unquote(fun), entry, head, 0, tail) {:halt, acc} _ -> next_with_acc(unquote(fun), entry, head, curr - 1, tail) end end end end defmacro take_every(nth, fun \\ nil) do quote do fn entry, acc(head, curr, tail) when curr in [unquote(nth), :first] -> next_with_acc(unquote(fun), entry, head, 1, tail) entry, acc(head, curr, tail) -> skip(acc(head, curr + 1, tail)) end end end defmacro take_while(callback, fun \\ nil) do quote do fn entry, acc -> if unquote(callback).(entry) do next(unquote(fun), entry, acc) else {:halt, acc} end end end end defmacro uniq_by(callback, fun \\ nil) do quote do fn entry, acc(head, prev, tail) = original -> value = unquote(callback).(entry) if Map.has_key?(prev, value) do skip(original) else next_with_acc(unquote(fun), entry, head, Map.put(prev, value, true), tail) end end end end defmacro with_index(fun \\ nil) do quote do fn entry, acc(head, counter, tail) -> next_with_acc(unquote(fun), {entry, counter}, head, counter + 1, tail) end end end end
lib/elixir/lib/stream/reducers.ex
0.575349
0.407628
reducers.ex
starcoder
defmodule MapArray do @moduledoc """ Documentation for MapArray. """ defguard is_index(i) when is_integer(i) and i >= 0 @type index :: non_neg_integer() @type t :: %{non_neg_integer() => any()} @spec append(t(), any) :: t() def append(map, item) do Map.put(map, map_size(map), item) end @doc """ This is an expensive operation as it forces reconstruction of the entire map. """ @spec prepend(t(), any) :: t() def prepend(map, item) do map |> Map.new(fn {i, v} -> {i + 1, v} end) |> Map.put(0, item) end @spec new(Enumerable.t()) :: t() def new(enumerable) do Enum.reduce(enumerable, %{}, fn item, acc -> append(acc, item) end) end @spec seek_index_up(map(), nil | non_neg_integer, (any -> boolean())) :: {:ok, index()} | :error def seek_index_up(map, start \\ 0, finder) when is_index(start) do start |> iter_up() |> fetch_index(map, finder) end @spec seek_index_down(map(), nil | non_neg_integer, (any -> boolean())) :: {:ok, index()} | :error def seek_index_down(map, start \\ nil, finder) when is_index(start) or is_nil(start) do start = start || len(map) - 1 start |> iter_down() |> fetch_index(map, finder) end @spec seek_down(map(), nil | non_neg_integer(), (any -> boolean())) :: {:ok, any} | :error def seek_down(map, start \\ nil, finder) do case seek_index_down(map, start, finder) do {:ok, i} -> Map.fetch(map, i) :error -> :error end end @spec seek_up(map(), non_neg_integer(), (any -> boolean())) :: {:ok, any} | :error def seek_up(map, start \\ 0, finder) do case seek_index_up(map, start, finder) do {:ok, i} -> Map.fetch(map, i) :error -> :error end end @spec len(map) :: non_neg_integer() def len(map), do: map_size(map) @spec max_index(t()) :: non_neg_integer() def max_index(map), do: map_size(map) - 1 @type reducer2 :: (any, any -> any) @type reducer3 :: (any, any, index -> any) @spec reduce(t(), any, reducer2 | reducer3) :: any def reduce(map, initial_state, reducer) when is_function(reducer) do 0 |> iter_up() |> do_reduce(map, initial_state, reducer) end @spec reverse_reduce(t(), any, reducer2 | reducer3) :: any def reverse_reduce(map, initial_state, reducer) when is_function(reducer) do map |> max_index() |> iter_down() |> do_reduce(map, initial_state, reducer) end defp do_reduce(iter, map, initial_state, reducer) do Enum.reduce_while(iter, initial_state, fn i, acc -> case Map.fetch(map, i) do {:ok, value} -> {:cont, apply_reducer(value, i, acc, reducer)} :error -> {:halt, acc} end end) end defp apply_reducer(value, i, acc, reducer) do cond do is_function(reducer, 2) -> reducer.(value, acc) is_function(reducer, 3) -> reducer.(value, acc, i) end end @type mapper1 :: (any -> any) @type mapper2 :: (any, index -> any) @spec map(map, mapper1 | mapper2) :: [any] def map(map, mapper) when is_map(map) and is_function(mapper) do 0 |> iter_up() |> do_map(map, mapper) end @spec reverse_map(map, (any -> any)) :: [any] def reverse_map(map, mapper) when is_map(map) and is_function(mapper) do map |> max_index() |> iter_down() |> do_map(map, mapper) end defp do_map(iter, map, mapper) do iter |> Stream.map(fn i -> apply_mapper(map, i, mapper) end) |> Enum.take(map_size(map)) end defp apply_mapper(map, i, mapper) do value = Map.fetch!(map, i) cond do is_function(mapper, 1) -> mapper.(value) is_function(mapper, 2) -> mapper.(value, i) end end @spec slice(any, Range.t()) :: [any] def slice(map, a..b) do a..b |> Stream.map(fn i -> Map.fetch(map, i) end) |> Stream.filter(fn item -> match?({:ok, _}, item) end) |> Stream.map(fn {:ok, value} -> value end) |> Enum.into([]) end @spec to_list(map) :: [any] def to_list(map) when is_map(map) do map(map, fn x -> x end) end @spec to_reversed_list(map) :: [any] def to_reversed_list(map) do reverse_map(map, fn x -> x end) end defp iter_up(start) when is_index(start) do Stream.iterate(start, fn i -> i + 1 end) end defp iter_down(start) when is_integer(start) and start >= 0 do Stream.iterate(start, fn i -> i - 1 end) end defp fetch_index(iter, map, matcher) do iter |> Enum.reduce_while(nil, fn i, _ -> do_get_index(map, i, matcher) end) |> case do nil -> :error i when is_index(i) -> {:ok, i} end end defp do_get_index(map, i, matcher) do with( {:ok, item} <- Map.fetch(map, i), {:match?, true} <- {:match?, item |> matcher.() |> to_boolean()} ) do {:halt, i} else :error -> {:halt, nil} {:match?, false} -> {:cont, nil} end end defp to_boolean(nil), do: false defp to_boolean(false), do: false defp to_boolean(_), do: true end
lib/map_array.ex
0.776369
0.501709
map_array.ex
starcoder
defmodule Sanbase.EventBus do @moduledoc """ The Event Bus is a mechanism that allows different components to communicate with each other without knowing about eachother. The Event Bus allows to decouple the creation of an event from the processing of that same event. A component sends events to the Event Bus without knowing what component will process these event or how many components will process the event. The components processing the events do not know and do not need to know who emitted the event. The event bus implementation lends itself into separating into two main component types - emitter and subscriber. The emitter sends event to topics and the subscriber listens on some or all topics and processes the messages. An emitter is every module that invokes the Sanbase.EventBus.notify/1 function. In order to emit an event, all a module needs is to know a valid topic name and valid event structure. The valid event structure are those that are recognized by the Sanbase.EventValidation.valid?/1 function. The subscribers should subscribe to a list of topics by invoking the EventBus.subscribe.subscribe/1 function like this: EventBus.subscribe({Sanbase.EventBus.KafkaExporterSubscriber, [".*"]}). The subscribers should implement a process/1 function that accepts an event_shadow and processes the event. Most often the subscriber is a GenServer and the process/1 function just casts the event shadow so it is processed asynchronously. """ use EventBus.EventSource require Application defmodule InvalidEventError do defexception [:message] end @topics [ :alert_events, :billing_events, :comment_topic, :insight_events, :invalid_events, :user_events, :watchlist_events ] @subscribers [ __MODULE__.KafkaExporterSubscriber, __MODULE__.UserEventsSubscriber, __MODULE__.BillingEventSubscriber ] def children(), do: @subscribers def init() do for topic <- @topics, do: EventBus.register_topic(topic) for subscriber <- @subscribers, do: EventBus.subscribe({subscriber, subscriber.topics()}) end def notify(params) do # In case the event is not valid, in prod this will rewrite the params so # the event is emitted in a special invalid_events topic. In dev/test the # behavior is to raise so errors are catched straight away. Invalid events # should not be emitted at all but they can slip in without good testing # and in this case prod should not break params = case Sanbase.EventBus.EventValidation.valid?(params.data) do true -> params false -> handle_invalid_event(params) end params = params |> Map.merge(%{ id: Map.get(params, :id, Ecto.UUID.generate()), topic: Map.fetch!(params, :topic), transaction_id: Map.get(params, :transaction_id), error_topic: Map.fetch!(params, :topic) }) EventSource.notify params do Map.fetch!(params, :data) end end case Application.compile_env(:sanbase, :env) do :prod -> defp handle_invalid_event(params) do # Replace the topic with the invalid events topic so the other topics # always contain valid events. Also put the original topic in the data params |> put_in([:data, :original_topic], params.topic) |> Map.put(:topic, :invalid_events) end _ -> defp handle_invalid_event(params) do raise( InvalidEventError, message: "Invalid event submitted: #{inspect(params)}" ) end end end
lib/sanbase/event_bus/event_bus.ex
0.732879
0.637757
event_bus.ex
starcoder
defmodule Float do @moduledoc """ Functions for working with floating point numbers. """ @doc """ Parses a binary into a float. If successful, returns a tuple of the form `{ float, remainder_of_binary }`. Otherwise `:error`. ## Examples iex> Float.parse("34") {34.0,""} iex> Float.parse("34.25") {34.25,""} iex> Float.parse("56.5xyz") {56.5,"xyz"} iex> Float.parse("pi") :error """ @spec parse(binary) :: { float, binary } | :error def parse(binary) when is_binary(binary) do case Integer.parse binary do :error -> :error { integer_part, after_integer } -> parse after_integer, integer_part end end # Dot followed by digit is required afterwards or we are done defp parse(<< ?., char, rest :: binary >>, int) when char in ?0..?9 do parse(rest, char - ?0, 1, int) end defp parse(rest, int) do { :erlang.float(int), rest } end # Handle decimal points defp parse(<< char, rest :: binary >>, float, decimal, int) when char in ?0..?9 do parse rest, 10 * float + (char - ?0), decimal + 1, int end defp parse(<< ?e, after_e :: binary >>, float, decimal, int) do case Integer.parse after_e do :error -> # Note we rebuild the binary here instead of breaking it apart at # the function clause because the current approach copies a binary # just on this branch. If we broke it apart in the function clause, # the copy would happen when calling Integer.parse/1. { floatify(int, float, decimal), << ?e, after_e :: binary >> } { exponential, after_exponential } -> { floatify(int, float, decimal, exponential), after_exponential } end end defp parse(bitstring, float, decimal, int) do { floatify(int, float, decimal), bitstring } end defp floatify(int, float, decimal, exponential // 0) do multiplier = if int < 0, do: -1.0, else: 1.0 # Try to ensure the minimum amount of rounding errors result = multiplier * (abs(int) * :math.pow(10, decimal) + float) * :math.pow(10, exponential - decimal) # Try avoiding stuff like this: # iex(1)> 0.0001 * 75 # 0.007500000000000001 # Due to IEEE 754 floating point standard # http://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html final_decimal_places = decimal - exponential if final_decimal_places > 0 do decimal_power_round = :math.pow(10, final_decimal_places) trunc(result * decimal_power_round) / decimal_power_round else result end end end
lib/elixir/lib/float.ex
0.931088
0.468061
float.ex
starcoder
defmodule AdventOfCode.Day07 do @moduledoc ~S""" [Advent Of Code day 7](https://adventofcode.com/2018/day/7). iex> input = [ ...> "Step C must be finished before step A can begin.", ...> "Step C must be finished before step F can begin.", ...> "Step A must be finished before step B can begin.", ...> "Step A must be finished before step D can begin.", ...> "Step B must be finished before step E can begin.", ...> "Step D must be finished before step E can begin.", ...> "Step F must be finished before step E can begin."] iex> AdventOfCode.Day07.solve("1", input) "CABDFE" iex>AdventOfCode.Day07.solve("2", input) 15 """ defmodule Workers do @number_of_workers if Mix.env() == :test, do: 2, else: 5 @base_time if Mix.env() == :test, do: 0, else: 60 @task_durations_map ?A..?Z |> Enum.with_index(@base_time + 1) |> Enum.into(%{}, fn {char, i} -> {to_string([char]), i} end) def new do Enum.map(1..@number_of_workers, fn _ -> {:idle, 0} end) end def count_idle(workers), do: Enum.count(workers, fn {task, _} -> task == :idle end) # expects no idle workers def finish_shortest_task(workers) do {task, min_time} = Enum.filter(workers, fn {task, _} -> task != :idle end) |> Enum.min_by(fn {_, time} -> time end) updated_workers = Enum.map(workers, fn {task, time} -> case time - min_time do 0 -> {:idle, 0} time -> {task, time} end end) {task, min_time, updated_workers} end def finish_all(workers) do {_task, max_time} = Enum.max_by(workers, fn {_, time} -> time end) {max_time, new()} end def assign_tasks(workers, tasks) do Enum.map_reduce(workers, tasks, &do_assign_tasks/2) |> elem(0) end defp do_assign_tasks(worker, []), do: {worker, []} defp do_assign_tasks({:idle, _}, [task | rest]), do: {{task, time_to_complete(task)}, rest} defp do_assign_tasks(worker, tasks), do: {worker, tasks} defp time_to_complete(task), do: @task_durations_map[task] end def solve("1", input) do requirements = input_into_requirements_map(input) {reversed_sequence, _} = Enum.reduce(1..Enum.count(requirements), {[], requirements}, fn _, {acc, requirements} -> {next_step, _} = Enum.min_by(requirements, fn {step, preq_list} -> {Enum.count(preq_list), step} end) requirements = Map.delete(requirements, next_step) |> Enum.into(%{}, fn {step, preq_list} -> {step, List.delete(preq_list, next_step)} end) {[next_step | acc], requirements} end) reversed_sequence |> Enum.reverse() |> Enum.join() end def solve("2", input) do requirements = input_into_requirements_map(input) {time, _, %{}} = Enum.reduce_while(Stream.cycle([nil]), {0, Workers.new(), requirements}, fn _, {time_acc, workers, requirements} -> tasks = available_tasks(requirements) idle_workers = Workers.count_idle(workers) {time, workers, requirements} = if tasks == [] || idle_workers == 0 do {task, time_spent, workers} = Workers.finish_shortest_task(workers) {time_acc + time_spent, workers, drop_dependency_from_tasks(requirements, [task])} else tasks_to_assign = Enum.take(tasks, idle_workers) workers = Workers.assign_tasks(workers, tasks_to_assign) {time_acc, workers, delete_tasks(requirements, tasks_to_assign)} end case Enum.count(requirements) do 0 -> {time_spent, workers} = Workers.finish_all(workers) {:halt, {time + time_spent, workers, %{}}} _ -> {:cont, {time, workers, requirements}} end end) time end defp available_tasks(requirements) do requirements |> Enum.filter(fn {_, req} -> req == [] end) |> Enum.sort_by(fn {task, _} -> task end) |> Enum.map(fn {task, _} -> task end) end defp delete_tasks(requirements, tasks) when is_list(tasks) do Enum.filter(requirements, fn {task, _} -> task not in tasks end) |> Enum.into(%{}) end defp drop_dependency_from_tasks(requirements, tasks_to_delete) when is_list(tasks_to_delete) do Enum.reduce(requirements, %{}, fn {task, blockers}, acc -> if task in tasks_to_delete do Map.put(acc, task, []) else case blockers -- tasks_to_delete do [] -> Map.put(acc, task, []) rest -> Map.put(acc, task, rest) end end end) end defp input_into_requirements_map(input) when is_binary(input) do input |> String.split("\n") |> input_into_requirements_map() end defp input_into_requirements_map(input) do Enum.reduce(input, %{}, fn line, acc -> <<"Step ", a::binary-size(1), " must be finished before step ", b::binary-size(1), " can begin.">> = line acc |> Map.update(b, [a], &[a | &1]) |> Map.put_new(a, []) end) end end
lib/advent_of_code/day_07.ex
0.566139
0.622947
day_07.ex
starcoder
defmodule Engine.DB.Transaction.PaymentV1.Validator.Amount do @moduledoc """ Contains validation logic for amounts, see validate/3 for more details. """ alias Engine.DB.Transaction.PaymentV1.Type alias Engine.Fee.FeeClaim @type validation_result_t() :: :ok | {:error, {:inputs, :amounts_do_not_add_up}} | {:error, {:inputs, :fees_not_covered}} | {:error, {:inputs, :fee_token_not_accepted}} | {:error, {:inputs, :overpaying_fees}} @doc """ Validates that the amount per token given in the inputs and outputs is correct. The following (per token) should be true: input_amount - output_amount - fees = 0 However fees can only be paid in 1 token per transaction. So if we have multiple tokens in a transaction, only one pay the fees and for the rest input_amount - output_amount must be 0. The logic here is: - We group inputs and outputs by token - We substract output amounts from input amounts per token - We remove tokens that have a 0 amount from the result - We ensure that amounts are positive - Only token/amount left should be the one that will pay the fee - If no token/amount left then it must be a merge or an error - We finally match the amount with the given fees Returns - `:ok` if the amounts are valid, or returns: - `{:error, {:inputs, :amounts_do_not_add_up}}` if output amounts are greater than input amounts - `{:error, {:inputs, :fees_not_covered}}` if fees are not covered by inputs - `{:error, {:inputs, :fee_token_not_accepted}}` if the given fee token is not supported - `{:error, {:inputs, :overpaying_fees}}` if fees are being overpaid. ## Example: iex> Engine.DB.Transaction.PaymentV1.Validator.Amount.validate( ...> %{<<1::160>> => [1, 3]}, ...> %{<<1::160>> => 1}) :ok """ @spec validate(Type.optional_accepted_fees_t(), FeeClaim.paid_fees_t()) :: validation_result_t() def validate(fees, paid_fees_by_currency) do with :ok <- positive_amounts(paid_fees_by_currency), :ok <- no_fees_required(paid_fees_by_currency, fees), :ok <- fees_covered(paid_fees_by_currency, fees) do :ok else :no_fees_required -> :ok error -> error end end defp positive_amounts(amounts) do case Enum.all?(amounts, &(elem(&1, 1) > 0)) do true -> :ok false -> {:error, {:inputs, :amounts_do_not_add_up}} end end # No fees required for merge transactions defp no_fees_required(amounts, :no_fees_required) when map_size(amounts) == 0, do: :no_fees_required defp no_fees_required(_, :no_fees_required), do: {:error, {:inputs, :overpaying_fees}} defp no_fees_required(_, _), do: :ok # If it's not a merge transaction, we should have at least one token to cover the fees defp fees_covered(amounts, _fees) when map_size(amounts) == 0 do {:error, {:inputs, :fees_not_covered}} end # We can't have more than 1 token paying for the fees defp fees_covered(amounts, _fees) when map_size(amounts) > 1 do {:error, {:inputs, :amounts_do_not_add_up}} end # In this case, we know that we have only one %{token => amount} defp fees_covered(amounts, fees) do fee_token = amounts |> Map.keys() |> hd() fee_paid = amounts[fee_token] case Map.get(fees, fee_token) do # Paying fees with an unsupported token nil -> {:error, {:inputs, :fee_token_not_accepted}} accepted_fee_amounts -> exact_fee_amount(fee_paid, accepted_fee_amounts) end end # The current_amount here is the latest accepted fee amount for this token. # We may have a buffer period during when we support previous fee amounts to avoid failure # of transactions that was created right before a fee update. # If the fee_paid is in the list of supported fee we are good, if not we return an error # based on the latest supported amount. defp exact_fee_amount(fee_paid, [current_amount | _] = accepted_fee_amounts) do cond do fee_paid in accepted_fee_amounts -> :ok current_amount > fee_paid -> {:error, {:inputs, :fees_not_covered}} current_amount < fee_paid -> {:error, {:inputs, :overpaying_fees}} end end end
apps/engine/lib/engine/db/transaction/payment_v1/validators/amount.ex
0.890972
0.608885
amount.ex
starcoder
defmodule Finance.CashFlow do @moduledoc """ An annuity of n regular payments or receipts occurring at evenly spaced periods can be represented by the cash flow: ``` [c0, c1, c2, c3, ....., cn] ``` Where the outgoings are represented by negative values, and income by positive values. Calculates the net present value of a cash flow which is represented by a list of values. Its assumed that the time period between values is constant, e.g. monthly, weekly etc. ``` c1 c2 cn c0 + ----- + ------- + .... + -------- = 0 1 + i (1+i)^2 (1+i)^n ``` ## Example From UK OFT document OFT144.pdf A borrower is advanced £7,500 on 15 August 2000, to be repaid over 48 months by equal monthly instalments. The first instalment is to be paid on 15 November 2000 and the lender requires a £25 administration fee to be paid at the same time. Interest will be charged monthly on the outstanding balance at one-twelfth of the lender’s variable annual base rate plus 4%. The base rate is 9.5% at the time the agreement is made. Although the lenders rate is variable we assume for now that its fixed for the duration of this loan at 4%. Annual interest rate = 9.5% + 4% = 13.5%. So the monthly interest is charged is: ``` i = 13.5% / 12 = 1.125% ``` No payments are made for the first two month but interest is charged on the advance giving a future value: 7500*(1+0.01125)^2 = 7669.69921875 iex> Finance.Simple.fv(7500, 0, 0.01125, 2) -7669.69921875 This value can be used as an adjusted advance for a 48 month payment period, in which we ignore the one-off admin fee. ``` P P P -7669.69921875 + --------- + ------------- + .... + -------------- = 0 1+0.01125 (1+0.01125)^2 (1+0.01125)^48 ``` iex> Finance.Simple.pmt(-7669.69921875, 0.01125, 48) |> Float.round(2) 207.67 The customer will pay £207.67 for 48 months, however as this has been rounded up to two decimal places, the customer will end up paying back slightly to much. The difference can be determined by calulating the future value of the loan with a payment of £207.67, which if correct would give a value of zero. iex> Finance.Simple.fv(-7669.69921875, 207.67, 0.01125, 48) |> Float.round(2) -0.17 So the final payment will be need to be adjusted by 17p i.e. 207.67 - 0.17 = £207.50 to compensate. Now we have payments, a cash flow can be constructed ``` c0 = -£7500 advance c1 = c2 = 0.0 2 months deferred payment c3 = £207.67 + £25 payment + admin fee c4 = c5 = .... = c49 = £207.67 46 payments c50 = £207.50 final payment Which can be solved to obtain the internal rate of return (irr) ``` iex> c = List.flatten([[-7500, 0, 0, 232.67], List.duplicate(207.67, 46), 207.50]) iex> {:ok, root} = Finance.CashFlow.irr(c) iex> Float.round(root, 12) 0.011384044595 Finally given that the time periods of the payments is monthly, the APR can be determined. iex> Finance.Rate.irr2apr(0.011384044595, Finance.Period.monthly) |> Float.round(1) 14.5 """ alias Finance.Numerical @doc """ Net Present Value of an arbitrary cash flow ## Example From http://www.financeformulas.net/Net_Present_Value.html | Year | Cash Flow | Present Value | 0 | -£500,000 | -£500,000 | 1 | £200,000 | £181,818.18 | 2 | £300,000 | £247,933.88 | 3 | £200,000 | £150,262.96 Net Present Value = £80,015.03 @ 10% iex> Finance.CashFlow.npv([-500000, 200000, 300000, 200000], 0.1) |> Float.round(2) 80015.03 """ def npv(c, irr) do f = 1.0 / (1.0 + irr) {npv, _} = Enum.reduce(c, {0.0, 1.0}, fn x, {s, fm} -> {s + fm * x, fm * f} end) npv end @doc """ First Derivative of the Net Present Value ``` v1 2 * v2 dpv = - ----------- - ----------- - ..... (1 + irr)^2 (1 + irr)^3 ``` ## Example iex> Finance.CashFlow.dnpv([-500000, 200000, 300000, 200000], 0.1) |> Float.round(2) -1025886.21 """ def dnpv(c, irr) do f = 1.0 / (1.0 + irr) {dnpv, _, _} = Enum.reduce(c, {0.0, 0.0, f}, fn x, {s, i, fm} -> {s - i * fm * x, i + 1.0, fm * f} end) dnpv end @doc """ Internal Rate of Return IRR """ @default_irr_guess 0.1 def irr(c, guess \\ @default_irr_guess) do npv = fn c -> fn i -> npv(c, i) end end dnpv = fn c -> fn i -> dnpv(c, i) end end Numerical.solve(npv.(c), dnpv.(c), guess) end end
lib/finance/cashflow.ex
0.882181
0.961461
cashflow.ex
starcoder
defmodule Bargad do @moduledoc """ ### Overview Bargad is a service which implements the concepts and data strucutures described in the **Certificate Transparency** whitepaper [RFC6962](https://tools.ietf.org/html/rfc6962) and the [**Compact Sparse Merkle tree**](https://osf.io/8mcnh/download) whitepaper. The data structures mentioned above are implemented through a **Merkle tree** which provides all the crytographic guarantees for the data. We provide a **storage layer** for this Merkle tree which allows us to scale it for extremely large sets of data. This storage layer is flexible to accomodate many types of backends. The Bargad service can operate in two modes - **Log Mode** - A verifiable append only log which is filled from left to right and provides the proof for **inclusion** and **consistency** of data. - **Map Mode** - A verifiable map which allows for the storage and retrieval of key value pairs and provides a cryptographich proof for the **inclusion** of this data. ### Features The Bargad Service as a whole supports the features listed below - Support for **mutiple backends** for persistence. - Multi-tenanted i.e it supports **multiple tree** heads. - Support for **multiple hashing algorithms**. - Uses **Protocol Buffers** for efficient serialization and deserialization of data. - Very **resilient**, recovers from crashes. Utilizes Erlang OTP constructs. Features specific to different modes are given below #### Verifiable Log - Implemented as a **dense** merkle tree, filled from left to right. - Supports generation of **consistency proofs** for the log. - Supports **verification** of the generated consistency proofs. - Supports generation of **inclusion proofs** for the log. - Supports **verfication** of the generated inclusion proof. #### Verifiable Map - Implemented as a **sparse** merkle tree with support for storing very large amounts of data. - If **SHA256** is used has the hashing algorithm for the underlying merkle tree, the map can support upto **2^256 keys**. - Supports generation of **audit/inclusion proofs** for the map. - Supports **verfication** of the generated inclusion proof. ### Comparison | | Bargad | [Trillian](https://github.com/google/trillian) | [Merkle Patricia Tree](https://github.com/exthereum/merkle_patricia_tree) | [Merkle Tree(https://github.com/yosriady/merkle_tree) | |---------------------------------------|--------|----------|----------------------|-------------| | Persistence | Yes | Yes | Yes | No | | Multiple Backends | Yes | Yes | Yes | No | | Multiple Trees | Yes | Yes | No | No | | Protocol Buffers | Yes | Yes | No | No | | Verifiable Log | Yes | Yes | Yes* | Yes* | | Verifiable Map | Yes | Yes | No | No | | Consistency Proof for Log | Yes | Yes | No | No | | Inclusion Proof for Log | Yes | Yes | Yes | Yes | | Inclusion/Non-Inclusion Proof for Map | Yes | Yes | No | No | | Filters/Personalities | No | Yes | No | No | | Batch writes | No | Yes | No | No | | Second Preimage attack prevention | Yes | Yes | No | No | ### Roadmap - Add Filters - Add signature to tree nodes - Support for batch writes - Support for LevelDB and PostgreSQL - Support synchronization of two trees - Provide snapshots of Map ### Using Bargad #### Installation Bargad is developed as an Elixir application, and is published to Hex, Elixir's package manager. The package can be installed to your mix project by adding `bargad` to the list of dependencies and applications in `mix.exs`: ```elixir defp deps do [ {:bargad, "~> 1.0.0"} ] end ``` ```elixir def application do [ extra_applications : [ :bargad, ....] ] end ``` And run: ```bash $ mix deps.get ``` The docs can be found at [https://hexdocs.pm/bargad](https://hexdocs.pm/bargad). #### Usage Bargad includes an integration test suite which covers most of the features Bargad service provides. Nevertheless here is a basic usage of Bargad in Verifiable Log mode. ```elixir ## Bargad in Verifiable Log mode ## Note that here we are directly using Bargad.Log module for simplicity, ## it is recommended to use the Superwised LogClient and MapClient. iex> tree = ...> Bargad.Log.new("FRZ", :sha256, [{"module", "ETSBackend"}]) |> ...> Bargad.Log.insert("3") |> ...> Bargad.Log.insert("7") iex> audit_proof = Bargad.Log.audit_proof(tree, 1) %{ hash: <<63, 219, 163, 95, 4, 220, 140, 70, 41, 134, 201, 146, 188, 248, 117, 84, 98, 87, 17, 48, 114, 169, 9, 193, 98, 247, 228, 112, 229, 129, 226, 120>>, proof: [ {<<103, 6, 113, 205, 151, 64, 65, 86, 34, 110, 80, 121, 115, 242, 171, 131, 48, 211, 2, 44, 169, 110, 12, 147, 189, 189, 179, 32, 196, 26, 220, 175>>, "R"} ], value: "3" } iex(3)> Bargad.Log.verify_audit_proof(tree, audit_proof) true iex(2)> consistency_proof = Bargad.Log.consistency_proof(tree, 1) [ <<63, 219, 163, 95, 4, 220, 140, 70, 41, 134, 201, 146, 188, 248, 117, 84, 98, 87, 17, 48, 114, 169, 9, 193, 98, 247, 228, 112, 229, 129, 226, 120>> ] ``` #### Integration Tests The integration tests can be found in the `./test/bargad_test.exs` file and can be run with the `mix test` command. ### Contributing 1. [Fork it!](https://github.com/ZanjeerPlatform/bargad/fork) 2. Create your feature branch (`git checkout -b my-new-feature`) 3. Commit your changes (`git commit -am 'Add some feature'`) 4. Push to the branch (`git push origin my-new-feature`) 5. Create new Pull Request ### Applications - **Certificate Transparency** - Bargad in Verifiable Log mode can implement the certificate transparency protocol mentioned in RFC6962. - **Blockchain** - Merkle trees and its derivatives form the basis of blockchains. Bargad in the Verifiable Log mode coupled with the multiple tree support can form the basis of a blockchain. - **Distributed Databases** - Databases use Merkle trees to efficiently synchronize replicas of a database. **Riak** and **Cassandra** are using merkle trees to successfully achieve this. Bargad can do this in the Verifiable Log mode by synchronsing two tree heads, one of which would be primary and the other out of date secondary. - **Secure and Distributed Filesystems** - **ZFS** by Oracle and InterPlanetary File System ( **IPFS** ) and peer to peer sharing networks like **BitTorrent** use merkle trees. ### Author <NAME> (@farazhaider) ### License See the license.md file for license details. """ use Application @doc false def start(_type, _args) do Bargad.Supervisor.start_link() end end
lib/bargad.ex
0.832407
0.96225
bargad.ex
starcoder
defmodule DartSass do @moduledoc """ DartSass is a installer and runner for [Sass](https://sass-lang.com/dart-sass). ## Profiles You can define multiple configuration profiles. By default, there is a profile called `:default` which you can configure its args, current directory and environment: config :dart_sass, version: "1.49.0", default: [ args: ~w(css/app.scss ../priv/static/assets/app.css), cd: Path.expand("../assets", __DIR__) ] ## Dart Sass configuration There are two global configurations for the `dart_sass` application: * `:version` - the expected Sass version. * `:path` - the path to the Sass executable. By default it is automatically downloaded and placed inside the `_build` directory of your current app. Note that if your system architecture requires a separate Dart VM executable to run, then `:path` should be defined as a list of absolute paths. Overriding the `:path` is not recommended, as we will automatically download and manage `sass` for you. But in case you can't download it (for example, the GitHub releases are behind a proxy), you may want to set the `:path` to a configurable system location. For instance, you can install `sass` globally with `npm`: $ npm install -g sass Then the executable will be at: NPM_ROOT/sass/sass.js Where `NPM_ROOT` is the result of `npm root -g`. Once you find the location of the executable, you can store it in a `MIX_SASS_PATH` environment variable, which you can then read in your configuration file: config :dart_sass, path: System.get_env("MIX_SASS_PATH") """ use Application require Logger @doc false def start(_, _) do unless Application.get_env(:dart_sass, :version) do Logger.warn(""" dart_sass version is not configured. Please set it in your config files: config :dart_sass, :version, "#{latest_version()}" """) end configured_version = configured_version() case bin_version() do {:ok, ^configured_version} -> :ok {:ok, version} -> Logger.warn(""" Outdated dart-sass version. Expected #{configured_version}, got #{version}. \ Please run `mix sass.install` or update the version in your config files.\ """) :error -> :ok end Supervisor.start_link([], strategy: :one_for_one) end @doc false # Latest known version at the time of publishing. def latest_version do "1.49.0" end @doc """ Returns the configured Sass version. """ def configured_version do Application.get_env(:dart_sass, :version, latest_version()) end @doc """ Returns the configuration for the given profile. Returns nil if the profile does not exist. """ def config_for!(profile) when is_atom(profile) do Application.get_env(:dart_sass, profile) || raise ArgumentError, """ unknown dart_sass profile. Make sure the profile is defined in your config files, such as: config :dart_sass, #{profile}: [ args: ~w(css/app.scss ../priv/static/assets/app.css), cd: Path.expand("../assets", __DIR__) ] """ end @doc """ Returns the path to the `sass` executable. Depending on your system target architecture, the path may be preceeded by the path to the Dart VM executable. """ def bin_path do platform = platform() cond do env_path = Application.get_env(:dart_sass, :path) -> List.wrap(env_path) Code.ensure_loaded?(Mix.Project) -> bin_path(platform, Path.dirname(Mix.Project.build_path())) true -> bin_path(platform, "_build") end end # TODO: Remove when dart-sass will exit when stdin is closed. @doc false def script_path() do Path.join(:code.priv_dir(:dart_sass), "dart_sass.bash") end @doc """ Returns the version of the Sass executable (or snapshot). Returns `{:ok, version_string}` on success or `:error` when the executable is not available. """ def bin_version do path = bin_path() with true <- path_exists?(path), {result, 0} <- cmd(path, ["--version"]) do {:ok, String.trim(result)} else _ -> :error end end defp cmd(path, args) do cmd(path, args, []) end defp cmd([command | args], extra_args, opts) do System.cmd(command, args ++ extra_args, opts) end @doc """ Runs the given command with `args`. The given args will be appended to the configured args. The task output will be streamed directly to stdio. It returns the status of the underlying call. """ def run(profile, extra_args) when is_atom(profile) and is_list(extra_args) do config = config_for!(profile) config_args = config[:args] || [] opts = [ cd: config[:cd] || File.cwd!(), env: config[:env] || %{}, into: IO.stream(:stdio, :line), stderr_to_stdout: true ] args = config_args ++ extra_args path = bin_path() # TODO: Remove when dart-sass will exit when stdin is closed. # Link: https://github.com/sass/dart-sass/pull/1411 path = if "--watch" in args and platform() != :windows do [script_path() | path] else path end path |> cmd(args, opts) |> elem(1) end @doc """ Installs, if not available, and then runs `sass`. Returns the same as `run/2`. """ def install_and_run(profile, args) do unless path_exists?(bin_path()) do install() end run(profile, args) end @doc """ Installs dart-sass with `configured_version/0`. """ def install do version = configured_version() tmp_opts = if System.get_env("MIX_XDG"), do: %{os: :linux}, else: %{} tmp_dir = freshdir_p(:filename.basedir(:user_cache, "cs-sass", tmp_opts)) || freshdir_p(Path.join(System.tmp_dir!(), "cs-sass")) || raise "could not install sass. Set MIX_XDG=1 and then set XDG_CACHE_HOME to the path you want to use as cache" platform = platform() name = "dart-sass-#{version}-#{target_extname(platform)}" url = "https://github.com/sass/dart-sass/releases/download/#{version}/#{name}" archive = fetch_body!(url) case unpack_archive(Path.extname(name), archive, tmp_dir) do :ok -> :ok other -> raise "couldn't unpack archive: #{inspect(other)}" end path = bin_path() case platform do :linux -> [sass | _] = path File.rm(sass) File.cp!(Path.join([tmp_dir, "dart-sass", "sass"]), sass) :macos -> [dart, snapshot | _] = path File.rm(dart) File.cp!(Path.join([tmp_dir, "dart-sass", "src", "dart"]), dart) File.rm(snapshot) File.cp!(Path.join([tmp_dir, "dart-sass", "src", "sass.snapshot"]), snapshot) :windows -> [dart, snapshot | _] = path File.rm(dart) File.cp!(Path.join([tmp_dir, "dart-sass", "src", "dart.exe"]), dart) File.rm(snapshot) File.cp!(Path.join([tmp_dir, "dart-sass", "src", "sass.snapshot"]), snapshot) end end defp bin_path(platform, base_path) do target = target(platform) case platform do :linux -> [Path.join(base_path, "sass-#{target}")] _ -> [ Path.join(base_path, "dart-#{target}"), Path.join(base_path, "sass.snapshot-#{target}") ] end end defp platform do case :os.type() do {:unix, :darwin} -> :macos {:unix, :linux} -> :linux {:unix, osname} -> raise "dart_sass is not available for osname: #{inspect(osname)}" {:win32, _} -> :windows end end defp path_exists?(path) do Enum.all?(path, &File.exists?/1) end defp freshdir_p(path) do with {:ok, _} <- File.rm_rf(path), :ok <- File.mkdir_p(path) do path else _ -> nil end end defp unpack_archive(".zip", zip, cwd) do with {:ok, _} <- :zip.unzip(zip, cwd: to_charlist(cwd)), do: :ok end defp unpack_archive(_, tar, cwd) do :erl_tar.extract({:binary, tar}, [:compressed, cwd: to_charlist(cwd)]) end defp target_extname(platform) do target = target(platform) case platform do :windows -> "#{target}.zip" _ -> "#{target}.tar.gz" end end # Available targets: https://github.com/sass/dart-sass/releases defp target(:windows) do case :erlang.system_info(:wordsize) * 8 do 32 -> "windows-ia32" 64 -> "windows-x64" end end defp target(platform) do arch_str = :erlang.system_info(:system_architecture) [arch | _] = arch_str |> List.to_string() |> String.split("-") # TODO: remove "arm" when we require OTP 24 arch = if platform == :macos and arch in ["aarch64", "arm"] do # Using Rosetta2 for M1 until sass/dart-sass runs native # Link: https://github.com/sass/dart-sass/issues/1125 "amd64" else arch end case arch do "amd64" -> "#{platform}-x64" "x86_64" -> "#{platform}-x64" "i686" -> "#{platform}-ia32" "i386" -> "#{platform}-ia32" _ -> raise "dart_sass not available for architecture: #{arch_str}" end end defp fetch_body!(url) do url = String.to_charlist(url) Logger.debug("Downloading dart-sass from #{url}") {:ok, _} = Application.ensure_all_started(:inets) {:ok, _} = Application.ensure_all_started(:ssl) if proxy = System.get_env("HTTP_PROXY") || System.get_env("http_proxy") do Logger.debug("Using HTTP_PROXY: #{proxy}") %{host: host, port: port} = URI.parse(proxy) :httpc.set_options([{:proxy, {{String.to_charlist(host), port}, []}}]) end if proxy = System.get_env("HTTPS_PROXY") || System.get_env("https_proxy") do Logger.debug("Using HTTPS_PROXY: #{proxy}") %{host: host, port: port} = URI.parse(proxy) :httpc.set_options([{:https_proxy, {{String.to_charlist(host), port}, []}}]) end # https://erlef.github.io/security-wg/secure_coding_and_deployment_hardening/inets cacertfile = CAStore.file_path() |> String.to_charlist() http_options = [ autoredirect: false, ssl: [ verify: :verify_peer, cacertfile: cacertfile, depth: 2, customize_hostname_check: [ match_fun: :public_key.pkix_verify_hostname_match_fun(:https) ] ] ] case :httpc.request(:get, {url, []}, http_options, []) do {:ok, {{_, 302, _}, headers, _}} -> {'location', download} = List.keyfind(headers, 'location', 0) options = [body_format: :binary] case :httpc.request(:get, {download, []}, http_options, options) do {:ok, {{_, 200, _}, _, body}} -> body other -> raise "couldn't fetch #{download}: #{inspect(other)}" end other -> raise "couldn't fetch #{url}: #{inspect(other)}" end end end
lib/dart_sass.ex
0.753467
0.586464
dart_sass.ex
starcoder
defmodule AWS.Backup do @moduledoc """ AWS Backup AWS Backup is a unified backup service designed to protect AWS services and their associated data. AWS Backup simplifies the creation, migration, restoration, and deletion of backups, while also providing reporting and auditing. """ @doc """ Creates a backup plan using a backup plan name and backup rules. A backup plan is a document that contains information that AWS Backup uses to schedule tasks that create recovery points for resources. If you call `CreateBackupPlan` with a plan that already exists, an `AlreadyExistsException` is returned. """ def create_backup_plan(client, input, options \\ []) do path_ = "/backup/plans/" headers = [] query_ = [] request(client, :put, path_, query_, headers, input, options, nil) end @doc """ Creates a JSON document that specifies a set of resources to assign to a backup plan. Resources can be included by specifying patterns for a `ListOfTags` and selected `Resources`. For example, consider the following patterns: * `Resources: "arn:aws:ec2:region:account-id:volume/volume-id"` * `ConditionKey:"department"` `ConditionValue:"finance"` `ConditionType:"StringEquals"` * `ConditionKey:"importance"` `ConditionValue:"critical"` `ConditionType:"StringEquals"` Using these patterns would back up all Amazon Elastic Block Store (Amazon EBS) volumes that are tagged as `"department=finance"`, `"importance=critical"`, in addition to an EBS volume with the specified volume ID. Resources and conditions are additive in that all resources that match the pattern are selected. This shouldn't be confused with a logical AND, where all conditions must match. The matching patterns are logically put together using the OR operator. In other words, all patterns that match are selected for backup. """ def create_backup_selection(client, backup_plan_id, input, options \\ []) do path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/selections/" headers = [] query_ = [] request(client, :put, path_, query_, headers, input, options, nil) end @doc """ Creates a logical container where backups are stored. A `CreateBackupVault` request includes a name, optionally one or more resource tags, an encryption key, and a request ID. Sensitive data, such as passport numbers, should not be included the name of a backup vault. """ def create_backup_vault(client, backup_vault_name, input, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}" headers = [] query_ = [] request(client, :put, path_, query_, headers, input, options, nil) end @doc """ Deletes a backup plan. A backup plan can only be deleted after all associated selections of resources have been deleted. Deleting a backup plan deletes the current version of a backup plan. Previous versions, if any, will still exist. """ def delete_backup_plan(client, backup_plan_id, input, options \\ []) do path_ = "/backup/plans/#{URI.encode(backup_plan_id)}" headers = [] query_ = [] request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Deletes the resource selection associated with a backup plan that is specified by the `SelectionId`. """ def delete_backup_selection(client, backup_plan_id, selection_id, input, options \\ []) do path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/selections/#{URI.encode(selection_id)}" headers = [] query_ = [] request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Deletes the backup vault identified by its name. A vault can be deleted only if it is empty. """ def delete_backup_vault(client, backup_vault_name, input, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}" headers = [] query_ = [] request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Deletes the policy document that manages permissions on a backup vault. """ def delete_backup_vault_access_policy(client, backup_vault_name, input, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/access-policy" headers = [] query_ = [] request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Deletes event notifications for the specified backup vault. """ def delete_backup_vault_notifications(client, backup_vault_name, input, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/notification-configuration" headers = [] query_ = [] request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Deletes the recovery point specified by a recovery point ID. """ def delete_recovery_point(client, backup_vault_name, recovery_point_arn, input, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/recovery-points/#{URI.encode(recovery_point_arn)}" headers = [] query_ = [] request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Returns backup job details for the specified `BackupJobId`. """ def describe_backup_job(client, backup_job_id, options \\ []) do path_ = "/backup-jobs/#{URI.encode(backup_job_id)}" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns metadata about a backup vault specified by its name. """ def describe_backup_vault(client, backup_vault_name, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns metadata associated with creating a copy of a resource. """ def describe_copy_job(client, copy_job_id, options \\ []) do path_ = "/copy-jobs/#{URI.encode(copy_job_id)}" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns information about a saved resource, including the last time it was backed up, its Amazon Resource Name (ARN), and the AWS service type of the saved resource. """ def describe_protected_resource(client, resource_arn, options \\ []) do path_ = "/resources/#{URI.encode(resource_arn)}" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns metadata associated with a recovery point, including ID, status, encryption, and lifecycle. """ def describe_recovery_point(client, backup_vault_name, recovery_point_arn, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/recovery-points/#{URI.encode(recovery_point_arn)}" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns the current service opt-in settings for the Region. If the service has a value set to `true`, AWS Backup tries to protect that service's resources in this Region, when included in an on-demand backup or scheduled backup plan. If the value is set to `false` for a service, AWS Backup does not try to protect that service's resources in this Region. """ def describe_region_settings(client, options \\ []) do path_ = "/account-settings" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns metadata associated with a restore job that is specified by a job ID. """ def describe_restore_job(client, restore_job_id, options \\ []) do path_ = "/restore-jobs/#{URI.encode(restore_job_id)}" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns the backup plan that is specified by the plan ID as a backup template. """ def export_backup_plan_template(client, backup_plan_id, options \\ []) do path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/toTemplate/" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns `BackupPlan` details for the specified `BackupPlanId`. Returns the body of a backup plan in JSON format, in addition to plan metadata. """ def get_backup_plan(client, backup_plan_id, version_id \\ nil, options \\ []) do path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/" headers = [] query_ = [] query_ = if !is_nil(version_id) do [{"versionId", version_id} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns a valid JSON document specifying a backup plan or an error. """ def get_backup_plan_from_j_s_o_n(client, input, options \\ []) do path_ = "/backup/template/json/toPlan" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Returns the template specified by its `templateId` as a backup plan. """ def get_backup_plan_from_template(client, backup_plan_template_id, options \\ []) do path_ = "/backup/template/plans/#{URI.encode(backup_plan_template_id)}/toPlan" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns selection metadata and a document in JSON format that specifies a list of resources that are associated with a backup plan. """ def get_backup_selection(client, backup_plan_id, selection_id, options \\ []) do path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/selections/#{URI.encode(selection_id)}" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns the access policy document that is associated with the named backup vault. """ def get_backup_vault_access_policy(client, backup_vault_name, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/access-policy" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns event notifications for the specified backup vault. """ def get_backup_vault_notifications(client, backup_vault_name, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/notification-configuration" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns a set of metadata key-value pairs that were used to create the backup. """ def get_recovery_point_restore_metadata(client, backup_vault_name, recovery_point_arn, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/recovery-points/#{URI.encode(recovery_point_arn)}/restore-metadata" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns the AWS resource types supported by AWS Backup. """ def get_supported_resource_types(client, options \\ []) do path_ = "/supported-resource-types" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns a list of existing backup jobs for an authenticated account. """ def list_backup_jobs(client, by_account_id \\ nil, by_backup_vault_name \\ nil, by_created_after \\ nil, by_created_before \\ nil, by_resource_arn \\ nil, by_resource_type \\ nil, by_state \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/backup-jobs/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end query_ = if !is_nil(by_state) do [{"state", by_state} | query_] else query_ end query_ = if !is_nil(by_resource_type) do [{"resourceType", by_resource_type} | query_] else query_ end query_ = if !is_nil(by_resource_arn) do [{"resourceArn", by_resource_arn} | query_] else query_ end query_ = if !is_nil(by_created_before) do [{"createdBefore", by_created_before} | query_] else query_ end query_ = if !is_nil(by_created_after) do [{"createdAfter", by_created_after} | query_] else query_ end query_ = if !is_nil(by_backup_vault_name) do [{"backupVaultName", by_backup_vault_name} | query_] else query_ end query_ = if !is_nil(by_account_id) do [{"accountId", by_account_id} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns metadata of your saved backup plan templates, including the template ID, name, and the creation and deletion dates. """ def list_backup_plan_templates(client, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/backup/template/plans" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns version metadata of your backup plans, including Amazon Resource Names (ARNs), backup plan IDs, creation and deletion dates, plan names, and version IDs. """ def list_backup_plan_versions(client, backup_plan_id, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/versions/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns a list of existing backup plans for an authenticated account. The list is populated only if the advanced option is set for the backup plan. The list contains information such as Amazon Resource Names (ARNs), plan IDs, creation and deletion dates, version IDs, plan names, and creator request IDs. """ def list_backup_plans(client, include_deleted \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/backup/plans/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end query_ = if !is_nil(include_deleted) do [{"includeDeleted", include_deleted} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns an array containing metadata of the resources associated with the target backup plan. """ def list_backup_selections(client, backup_plan_id, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/backup/plans/#{URI.encode(backup_plan_id)}/selections/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns a list of recovery point storage containers along with information about them. """ def list_backup_vaults(client, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/backup-vaults/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns metadata about your copy jobs. """ def list_copy_jobs(client, by_account_id \\ nil, by_created_after \\ nil, by_created_before \\ nil, by_destination_vault_arn \\ nil, by_resource_arn \\ nil, by_resource_type \\ nil, by_state \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/copy-jobs/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end query_ = if !is_nil(by_state) do [{"state", by_state} | query_] else query_ end query_ = if !is_nil(by_resource_type) do [{"resourceType", by_resource_type} | query_] else query_ end query_ = if !is_nil(by_resource_arn) do [{"resourceArn", by_resource_arn} | query_] else query_ end query_ = if !is_nil(by_destination_vault_arn) do [{"destinationVaultArn", by_destination_vault_arn} | query_] else query_ end query_ = if !is_nil(by_created_before) do [{"createdBefore", by_created_before} | query_] else query_ end query_ = if !is_nil(by_created_after) do [{"createdAfter", by_created_after} | query_] else query_ end query_ = if !is_nil(by_account_id) do [{"accountId", by_account_id} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns an array of resources successfully backed up by AWS Backup, including the time the resource was saved, an Amazon Resource Name (ARN) of the resource, and a resource type. """ def list_protected_resources(client, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/resources/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns detailed information about the recovery points stored in a backup vault. """ def list_recovery_points_by_backup_vault(client, backup_vault_name, by_backup_plan_id \\ nil, by_created_after \\ nil, by_created_before \\ nil, by_resource_arn \\ nil, by_resource_type \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/recovery-points/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end query_ = if !is_nil(by_resource_type) do [{"resourceType", by_resource_type} | query_] else query_ end query_ = if !is_nil(by_resource_arn) do [{"resourceArn", by_resource_arn} | query_] else query_ end query_ = if !is_nil(by_created_before) do [{"createdBefore", by_created_before} | query_] else query_ end query_ = if !is_nil(by_created_after) do [{"createdAfter", by_created_after} | query_] else query_ end query_ = if !is_nil(by_backup_plan_id) do [{"backupPlanId", by_backup_plan_id} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns detailed information about recovery points of the type specified by a resource Amazon Resource Name (ARN). """ def list_recovery_points_by_resource(client, resource_arn, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/resources/#{URI.encode(resource_arn)}/recovery-points/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns a list of jobs that AWS Backup initiated to restore a saved resource, including metadata about the recovery process. """ def list_restore_jobs(client, by_account_id \\ nil, by_created_after \\ nil, by_created_before \\ nil, by_status \\ nil, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/restore-jobs/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end query_ = if !is_nil(by_status) do [{"status", by_status} | query_] else query_ end query_ = if !is_nil(by_created_before) do [{"createdBefore", by_created_before} | query_] else query_ end query_ = if !is_nil(by_created_after) do [{"createdAfter", by_created_after} | query_] else query_ end query_ = if !is_nil(by_account_id) do [{"accountId", by_account_id} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns a list of key-value pairs assigned to a target recovery point, backup plan, or backup vault. `ListTags` are currently only supported with Amazon EFS backups. """ def list_tags(client, resource_arn, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/tags/#{URI.encode(resource_arn)}/" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"nextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"maxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Sets a resource-based policy that is used to manage access permissions on the target backup vault. Requires a backup vault name and an access policy document in JSON format. """ def put_backup_vault_access_policy(client, backup_vault_name, input, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/access-policy" headers = [] query_ = [] request(client, :put, path_, query_, headers, input, options, nil) end @doc """ Turns on notifications on a backup vault for the specified topic and events. """ def put_backup_vault_notifications(client, backup_vault_name, input, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/notification-configuration" headers = [] query_ = [] request(client, :put, path_, query_, headers, input, options, nil) end @doc """ Starts an on-demand backup job for the specified resource. """ def start_backup_job(client, input, options \\ []) do path_ = "/backup-jobs" headers = [] query_ = [] request(client, :put, path_, query_, headers, input, options, nil) end @doc """ Starts a job to create a one-time copy of the specified resource. """ def start_copy_job(client, input, options \\ []) do path_ = "/copy-jobs" headers = [] query_ = [] request(client, :put, path_, query_, headers, input, options, nil) end @doc """ Recovers the saved resource identified by an Amazon Resource Name (ARN). If the resource ARN is included in the request, then the last complete backup of that resource is recovered. If the ARN of a recovery point is supplied, then that recovery point is restored. """ def start_restore_job(client, input, options \\ []) do path_ = "/restore-jobs" headers = [] query_ = [] request(client, :put, path_, query_, headers, input, options, nil) end @doc """ Attempts to cancel a job to create a one-time backup of a resource. """ def stop_backup_job(client, backup_job_id, input, options \\ []) do path_ = "/backup-jobs/#{URI.encode(backup_job_id)}" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Assigns a set of key-value pairs to a recovery point, backup plan, or backup vault identified by an Amazon Resource Name (ARN). """ def tag_resource(client, resource_arn, input, options \\ []) do path_ = "/tags/#{URI.encode(resource_arn)}" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Removes a set of key-value pairs from a recovery point, backup plan, or backup vault identified by an Amazon Resource Name (ARN) """ def untag_resource(client, resource_arn, input, options \\ []) do path_ = "/untag/#{URI.encode(resource_arn)}" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Updates an existing backup plan identified by its `backupPlanId` with the input document in JSON format. The new version is uniquely identified by a `VersionId`. """ def update_backup_plan(client, backup_plan_id, input, options \\ []) do path_ = "/backup/plans/#{URI.encode(backup_plan_id)}" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Sets the transition lifecycle of a recovery point. The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold. """ def update_recovery_point_lifecycle(client, backup_vault_name, recovery_point_arn, input, options \\ []) do path_ = "/backup-vaults/#{URI.encode(backup_vault_name)}/recovery-points/#{URI.encode(recovery_point_arn)}" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Updates the current service opt-in settings for the Region. If the service has a value set to `true`, AWS Backup tries to protect that service's resources in this Region, when included in an on-demand backup or scheduled backup plan. If the value is set to `false` for a service, AWS Backup does not try to protect that service's resources in this Region. """ def update_region_settings(client, input, options \\ []) do path_ = "/account-settings" headers = [] query_ = [] request(client, :put, path_, query_, headers, input, options, nil) end @spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) :: {:ok, map() | nil, map()} | {:error, term()} defp request(client, method, path, query, headers, input, options, success_status_code) do client = %{client | service: "backup"} host = build_host("backup", client) url = host |> build_url(path, client) |> add_query(query, client) additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}] headers = AWS.Request.add_headers(additional_headers, headers) payload = encode!(client, input) headers = AWS.Request.sign_v4(client, method, url, headers, payload) perform_request(client, method, url, payload, headers, options, success_status_code) end defp perform_request(client, method, url, payload, headers, options, success_status_code) do case AWS.Client.request(client, method, url, payload, headers, options) do {:ok, %{status_code: status_code, body: body} = response} when is_nil(success_status_code) and status_code in [200, 202, 204] when status_code == success_status_code -> body = if(body != "", do: decode!(client, body)) {:ok, body, response} {:ok, response} -> {:error, {:unexpected_response, response}} error = {:error, _reason} -> error end end defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do endpoint end defp build_host(_endpoint_prefix, %{region: "local"}) do "localhost" end defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do "#{endpoint_prefix}.#{region}.#{endpoint}" end defp build_url(host, path, %{:proto => proto, :port => port}) do "#{proto}://#{host}:#{port}#{path}" end defp add_query(url, [], _client) do url end defp add_query(url, query, client) do querystring = encode!(client, query, :query) "#{url}?#{querystring}" end defp encode!(client, payload, format \\ :json) do AWS.Client.encode!(client, payload, format) end defp decode!(client, payload) do AWS.Client.decode!(client, payload, :json) end end
lib/aws/generated/backup.ex
0.882105
0.477371
backup.ex
starcoder
defmodule Mix.Tasks.Vessel.Compile do # mix task use Mix.Task # different Vessel build phases @phases [ :mapper, :combiner, :reducer ] # information for the mix command line @shortdoc "Compiles any designated Vessel binaries" @recursive true @moduledoc """ Compiles a set of Vessel binaries. The binaries to compile are dictated by the `:vessel` property inside the Mix project definition. The property should be a Keyword List containing a subset of the keys `:mapper`, `:combiner` and `:reducer`. Each of these keys may have options provided as to where to build the binary to, and the module which should act as the entry point: [ mapper: [ module: MyMapper, target: "./binaries/mapper" ] ] If you don't provide an `:target` property, it will be placed in `target/<ver>` with a name of the form `{app_name}-{type}`, for example `my_app-mapper`. If you do not wish to customise the output, you can just set the properties as an Atom and it will be used as the module name: [ mapper: MyMapper ] # unpacks to [ mapper: [ module: MyMapper ] ] If your module name is not provided, the binary will be ignored - however if your module is invalid, an error will be raised. """ def run(args) do Mix.Project.get! Mix.Task.run("compile", args) project = Mix.Project.config() vessel = Keyword.get(project, :vessel, []) for phase <- @phases do opts = case Keyword.get(vessel, phase) do nil -> [] val -> ensure_opts!(val, phase) end case Keyword.get(opts, :module) do nil -> nil mod -> build!(project, mod, opts, phase) end end end # Carries out a build for a given phase using the provided module definition # and options. This is basically a delegation function to `Exscript` under the # hood, just with a couple of options defined automatically (e.g. target). defp build!(project, module, options, phase) do ensure_module!(module, phase) app = Keyword.get(project, :app) ver = Keyword.get(project, :version) out = Keyword.get(options, :target, "./rel/v#{ver}/#{app}-#{phase}") name = String.to_atom("#{app}_#{phase}") opts = [ app: name, path: out, main_module: module ] Exscript.escriptize(project, :elixir, opts, true, true) end # Verifies that a module designation is a correct Vessel module and exists in # the bounds of a compilation pass. If it does not, we raise an error with a # prompt about Vessel inheritance, in case they simply missed the `use` clause. defp ensure_module!(module, phase) do loaded? = Code.ensure_loaded?(module) vessel? = :erlang.function_exported(module, :main, 1) unless loaded? and vessel? do hmod = String.trim_leading("#{module}", "Elixir.") hves = vessel_type(phase) Mix.raise("Could not generate Vessel binary, please ensure that " <> "#{hmod} exists and correctly includes `use #{hves}`") end end # Validates the format of options provided for a Vessel phase. We convert any # lone Atoms to a List, against the `:module` key. Any Lists are kept as-is, # and anything else will raise an error message. defp ensure_opts!(val, _phase) when is_atom(val), do: [ module: val ] defp ensure_opts!(val, _phase) when is_list(val), do: val defp ensure_opts!(_val, phase), do: Mix.raise("Invalid Vessel option provided for key :#{phase}") # Converts a phase name to the correct Elixir module name representation. This # will convert `:combiner` to `Mapper.Reducer` as combiners inherit Reducers. defp vessel_type(:mapper), do: "Vessel.Mapper" defp vessel_type(_module), do: "Vessel.Reducer" end
lib/mix/tasks/vessel.compile.ex
0.882054
0.527438
vessel.compile.ex
starcoder
defmodule ETS.Bag do @moduledoc """ Module for creating and interacting with :ets tables of the type `:bag` and `:duplicate_bag`. Bags contain "records" which are tuples. Bags are configured with a key position via the `keypos: integer` option. If not specified, the default key position is 1. The element of the tuple record at the key position is that records key. For example, setting the `keypos` to 2 means the key of an inserted record `{:a, :b}` is `:b`: iex> {:ok, bag} = Bag.new(keypos: 2) iex> Bag.add!(bag, {:a, :b}) iex> Bag.lookup(bag, :a) {:ok, []} iex> Bag.lookup(bag, :b) {:ok, [{:a, :b}]} When a record is added to the table with `add_new` will only add the record if a matching key doesn't already exist. ## Examples iex> {:ok, bag} = Bag.new() iex> Bag.add_new!(bag, {:a, :b, :c}) iex> Bag.to_list!(bag) [{:a, :b, :c}] iex> Bag.add_new!(bag, {:d, :e, :f}) iex> Bag.to_list!(bag) [{:d, :e, :f}, {:a, :b, :c}] iex> Bag.add_new!(bag, {:a, :g, :h}) iex> Bag.to_list!(bag) [{:d, :e, :f}, {:a, :b, :c}] `add` and `add_new` take either a single tuple or a list of tuple records. When adding multiple records, they are added in an atomic an isolated manner. `add_new` doesn't add any records if any of the new keys already exist in the bag. By default, Bags allow duplicate records (each element of the tuple record is identical). To prevent duplicate records, set the `duplicate: false` opt when creating the Bag (if you want to prevent duplicate *keys*, use an `ETS.Set` instead). Note that `duplicate: false` will increase the time it takes to add records as the table must be checked for duplicates prior to insert. `duplicate: true` maps to the `:ets` table type `:duplicate_bag`, `duplicate: false` maps to `:bag`. ## Working with named tables The functions on `ETS.Bag` require that you pass in an `ETS.Bag` as the first argument. In some design patterns, you may have the table name but an instance of an `ETS.Bag` may not be available to you. If this is the case, you should use `wrap_existing/1` to turn your table name atom into an `ETS.Bag`. For example, a `GenServer` that handles writes within the server, but reads in the client process would be implemented like this: ``` defmodule MyExampleGenServer do use GenServer alias ETS.Bag # Client Functions def get_roles_for_user(user_id) do :my_role_table |> Bag.wrap_existing!() |> Bag.lookup!(user_id) |> Enum.map(&elem(&1, 1)) end def add_role_for_user(user_id, role) do GenServer.call(__MODULE__, {:add_role_for_user, user_id, role}) end # Server Functions def init(_) do {:ok, %{bag: Bag.new!(name: :my_role_table)}} end def handle_call({:add_role_for_user, user_id, role}, _from, %{bag: bag}) do Bag.add(bag, {user_id, role}) end end ``` """ use ETS.Utils alias ETS.{ Bag, Base } @type t :: %__MODULE__{ info: keyword(), duplicate: boolean(), table: ETS.table_reference() } @type bag_options :: [ETS.Base.option() | {:duplicate, boolean()}] defstruct table: nil, info: nil, duplicate: nil @doc """ Creates new bag module with the specified options. Note that the underlying :ets table will be attached to the process that calls `new` and will be destroyed if that process dies. Possible options: * `name:` when specified, creates a named table with the specified name * `duplicate:` when true, allows multiple identical records. (default true) * `protection:` :private, :protected, :public (default :protected) * `heir:` :none | {heir_pid, heir_data} (default :none) * `keypos:` integer (default 1) * `read_concurrency:` boolean (default false) * `write_concurrency:` boolean (default false) * `compressed:` boolean (default false) ## Examples iex> {:ok, bag} = Bag.new(duplicate: false, keypos: 3, read_concurrency: true, compressed: false) iex> Bag.info!(bag)[:read_concurrency] true # Named :ets tables via the name keyword iex> {:ok, bag} = Bag.new(name: :my_ets_table) iex> Bag.info!(bag)[:name] :my_ets_table """ @spec new(bag_options) :: {:error, any()} | {:ok, Bag.t()} def new(opts \\ []) when is_list(opts) do {opts, duplicate} = take_opt(opts, :duplicate, true) if is_boolean(duplicate) do case Base.new_table(type(duplicate), opts) do {:error, reason} -> {:error, reason} {:ok, {table, info}} -> {:ok, %Bag{table: table, info: info, duplicate: duplicate}} end else {:error, {:invalid_option, {:duplicate, duplicate}}} end end @doc """ Same as `new/1` but unwraps or raises on error. """ @spec new!(bag_options) :: Bag.t() def new!(opts \\ []), do: unwrap_or_raise(new(opts)) defp type(true), do: :duplicate_bag defp type(false), do: :bag @doc """ Returns information on the bag. Second parameter forces updated information from ets, default (false) uses in-struct cached information. Force should be used when requesting size and memory. ## Examples iex> {:ok, bag} = Bag.new(duplicate: false, keypos: 3, read_concurrency: true, compressed: false) iex> {:ok, info} = Bag.info(bag) iex> info[:read_concurrency] true iex> {:ok, _} = Bag.add(bag, {:a, :b, :c}) iex> {:ok, info} = Bag.info(bag) iex> info[:size] 0 iex> {:ok, info} = Bag.info(bag, true) iex> info[:size] 1 """ @spec info(Bag.t(), boolean()) :: {:ok, keyword()} | {:error, any()} def info(bag, force_update \\ false) def info(%Bag{table: table}, true), do: Base.info(table) def info(%Bag{info: info}, false), do: {:ok, info} @doc """ Same as `info/1` but unwraps or raises on error. """ @spec info!(Bag.t(), boolean()) :: keyword() def info!(%Bag{} = bag, force_update \\ false) when is_boolean(force_update), do: unwrap_or_raise(info(bag, force_update)) @doc """ Returns underlying `:ets` table reference. For use in functions that are not yet implemented. If you find yourself using this, please consider submitting a PR to add the necessary function to `ETS`. ## Examples iex> bag = Bag.new!(name: :my_ets_table) iex> {:ok, table} = Bag.get_table(bag) iex> info = :ets.info(table) iex> info[:name] :my_ets_table """ @spec get_table(Bag.t()) :: {:ok, ETS.table_reference()} def get_table(%Bag{table: table}), do: {:ok, table} @doc """ Same as `get_table/1` but unwraps or raises on error """ @spec get_table!(Bag.t()) :: ETS.table_reference() def get_table!(%Bag{} = bag), do: unwrap(get_table(bag)) @doc """ Adds tuple record or list of tuple records to table. If Bag has `duplicate: false`, will overwrite duplicate records (full tuple must match, not just key). Inserts multiple records in an [atomic and isolated](http://erlang.org/doc/man/ets.html#concurrency) manner. ## Examples iex> {:ok, bag} = Bag.new() iex> {:ok, _} = Bag.add(bag, [{:a, :b, :c}, {:d, :e, :f}]) iex> {:ok, _} = Bag.add(bag, {:a, :h, :i}) iex> {:ok, _} = Bag.add(bag, {:d, :x, :y}) iex> {:ok, _} = Bag.add(bag, {:d, :e, :f}) iex> Bag.to_list(bag) {:ok, [{:d, :e, :f}, {:d, :x, :y}, {:d, :e, :f}, {:a, :b, :c}, {:a, :h, :i}]} iex> {:ok, bag} = Bag.new(duplicate: false) iex> {:ok, _} = Bag.add(bag, [{:a, :b, :c}, {:d, :e, :f}]) iex> {:ok, _} = Bag.add(bag, {:a, :h, :i}) iex> {:ok, _} = Bag.add(bag, {:d, :x, :y}) iex> {:ok, _} = Bag.add(bag, {:d, :e, :f}) # won't insert due to duplicate: false iex> Bag.to_list(bag) {:ok, [{:d, :e, :f}, {:d, :x, :y}, {:a, :b, :c}, {:a, :h, :i}]} """ @spec add(Bag.t(), tuple() | list(tuple())) :: {:ok, Bag.t()} | {:error, any()} def add(%Bag{table: table} = bag, record) when is_tuple(record), do: Base.insert(table, record, bag) def add(%Bag{table: table} = bag, records) when is_list(records), do: Base.insert_multi(table, records, bag) @doc """ Same as `add/3` but unwraps or raises on error. """ @spec add!(Bag.t(), tuple() | list(tuple())) :: Bag.t() def add!(%Bag{} = bag, record_or_records) when is_tuple(record_or_records) or is_list(record_or_records), do: unwrap_or_raise(add(bag, record_or_records)) @doc """ Same as `add/2` but doesn't add any records if one of the given keys already exists. ## Examples iex> bag = Bag.new!() iex> {:ok, _} = Bag.add_new(bag, [{:a, :b, :c}, {:d, :e, :f}]) iex> {:ok, _} = Bag.add_new(bag, [{:a, :x, :y}, {:g, :h, :i}]) # skips due to duplicate :a key iex> {:ok, _} = Bag.add_new(bag, {:d, :z, :zz}) # skips due to duplicate :d key iex> Bag.to_list!(bag) [{:d, :e, :f}, {:a, :b, :c}] """ @spec add_new(Bag.t(), tuple() | list(tuple())) :: {:ok, Bag.t()} | {:error, any()} def add_new(%Bag{table: table} = bag, record) when is_tuple(record), do: Base.insert_new(table, record, bag) def add_new(%Bag{table: table} = bag, records) when is_list(records), do: Base.insert_multi_new(table, records, bag) @doc """ Same as `add_new/2` but unwraps or raises on error. """ @spec add_new!(Bag.t(), tuple() | list(tuple())) :: Bag.t() def add_new!(%Bag{} = bag, record_or_records) when is_tuple(record_or_records) or is_list(record_or_records), do: unwrap_or_raise(add_new(bag, record_or_records)) @doc """ Returns list of records with specified key. ## Examples iex> Bag.new!() iex> |> Bag.add!({:a, :b, :c}) iex> |> Bag.add!({:d, :e, :f}) iex> |> Bag.add!({:d, :e, :g}) iex> |> Bag.lookup(:d) {:ok, [{:d, :e, :f}, {:d, :e, :g}]} """ @spec lookup(Bag.t(), any()) :: {:ok, [tuple()]} | {:error, any()} def lookup(%Bag{table: table}, key), do: Base.lookup(table, key) @doc """ Same as `lookup/3` but unwraps or raises on error. """ @spec lookup!(Bag.t(), any()) :: [tuple()] def lookup!(%Bag{} = bag, key), do: unwrap_or_raise(lookup(bag, key)) @doc """ Returns list of elements in specified position of records with specified key. ## Examples iex> Bag.new!() iex> |> Bag.add!({:a, :b, :c}) iex> |> Bag.add!({:d, :e, :f}) iex> |> Bag.add!({:d, :h, :i}) iex> |> Bag.lookup_element(:d, 2) {:ok, [:e, :h]} """ @spec lookup_element(Bag.t(), any(), non_neg_integer()) :: {:ok, [any()]} | {:error, any()} def lookup_element(%Bag{table: table}, key, pos), do: Base.lookup_element(table, key, pos) @doc """ Same as `lookup_element/3` but unwraps or raises on error. """ @spec lookup_element!(Bag.t(), any(), non_neg_integer()) :: [any()] def lookup_element!(%Bag{} = bag, key, pos), do: unwrap_or_raise(lookup_element(bag, key, pos)) @doc """ Returns records in the Bag that match the specified pattern. For more information on the match pattern, see the [erlang documentation](http://erlang.org/doc/man/ets.html#match-2) ## Examples iex> Bag.new!() iex> |> Bag.add!([{:a, :b, :c, :d}, {:e, :c, :f, :g}, {:h, :b, :i, :j}]) iex> |> Bag.match({:"$1", :b, :"$2", :_}) {:ok, [[:h, :i], [:a, :c]]} """ @spec match(Bag.t(), ETS.match_pattern()) :: {:ok, [tuple()]} | {:error, any()} def match(%Bag{table: table}, pattern) when is_atom(pattern) or is_tuple(pattern), do: Base.match(table, pattern) @doc """ Same as `match/2` but unwraps or raises on error. """ @spec match!(Bag.t(), ETS.match_pattern()) :: [tuple()] def match!(%Bag{} = bag, pattern) when is_atom(pattern) or is_tuple(pattern), do: unwrap_or_raise(match(bag, pattern)) @doc """ Same as `match/2` but limits number of results to the specified limit. ## Examples iex> bag = Bag.new!() iex> Bag.add!(bag, [{:a, :b, :c, :d}, {:e, :b, :f, :g}, {:h, :b, :i, :j}]) iex> {:ok, {results, _continuation}} = Bag.match(bag, {:"$1", :b, :"$2", :_}, 2) iex> results [[:e, :f], [:a, :c]] """ @spec match(Bag.t(), ETS.match_pattern(), non_neg_integer()) :: {:ok, {[tuple()], any() | :end_of_table}} | {:error, any()} def match(%Bag{table: table}, pattern, limit), do: Base.match(table, pattern, limit) @doc """ Same as `match/3` but unwraps or raises on error. """ @spec match!(Bag.t(), ETS.match_pattern(), non_neg_integer()) :: {[tuple()], any() | :end_of_table} def match!(%Bag{} = bag, pattern, limit), do: unwrap_or_raise(match(bag, pattern, limit)) @doc """ Matches next bag of records from a match/3 or match/1 continuation. ## Examples iex> bag = Bag.new!() iex> Bag.add!(bag, [{:a, :b, :c, :d}, {:e, :b, :f, :g}, {:h, :b, :i, :j}]) iex> {:ok, {results, continuation}} = Bag.match(bag, {:"$1", :b, :"$2", :_}, 2) iex> results [[:e, :f], [:a, :c]] iex> {:ok, {records2, continuation2}} = Bag.match(continuation) iex> records2 [[:h, :i]] iex> continuation2 :end_of_table """ @spec match(any()) :: {:ok, {[tuple()], any() | :end_of_table}} | {:error, any()} def match(continuation), do: Base.match(continuation) @doc """ Same as `match/1` but unwraps or raises on error. """ @spec match!(any()) :: {[tuple()], any() | :end_of_table} def match!(continuation), do: unwrap_or_raise(match(continuation)) @doc """ Returns records in the specified Bag that match the specified match specification. For more information on the match specification, see the [erlang documentation](http://erlang.org/doc/man/ets.html#select-2) ## Examples iex> Bag.new!() iex> |> Bag.add!([{:a, :b, :c, :d}, {:e, :c, :f, :g}, {:h, :b, :i, :j}]) iex> |> Bag.select([{{:"$1", :b, :"$2", :_},[],[:"$$"]}]) {:ok, [[:h, :i], [:a, :c]]} """ @spec select(Bag.t(), ETS.match_spec()) :: {:ok, [tuple()]} | {:error, any()} def select(%Bag{table: table}, spec) when is_list(spec), do: Base.select(table, spec) @doc """ Same as `select/2` but unwraps or raises on error. """ @spec select!(Bag.t(), ETS.match_spec()) :: [tuple()] def select!(%Bag{} = bag, spec) when is_list(spec), do: unwrap_or_raise(select(bag, spec)) @doc """ Deletes records in the specified Bag that match the specified match specification. For more information on the match specification, see the [erlang documentation](http://erlang.org/doc/man/ets.html#select_delete-2) ## Examples iex> bag = Bag.new!() iex> bag iex> |> Bag.add!([{:a, :b, :c, :d}, {:e, :c, :f, :g}, {:h, :b, :c, :h}]) iex> |> Bag.select_delete([{{:"$1", :b, :"$2", :_},[{:"==", :"$2", :c}],[true]}]) {:ok, 2} iex> Bag.to_list!(bag) [{:e, :c, :f, :g}] """ @spec select_delete(Bag.t(), ETS.match_spec()) :: {:ok, non_neg_integer()} | {:error, any()} def select_delete(%Bag{table: table}, spec) when is_list(spec), do: Base.select_delete(table, spec) @doc """ Same as `select_delete/2` but unwraps or raises on error. """ @spec select_delete!(Bag.t(), ETS.match_spec()) :: non_neg_integer() def select_delete!(%Bag{} = bag, spec) when is_list(spec), do: unwrap_or_raise(select_delete(bag, spec)) @doc """ Determines if specified key exists in specified bag. ## Examples iex> bag = Bag.new!() iex> Bag.has_key(bag, :key) {:ok, false} iex> Bag.add(bag, {:key, :value}) iex> Bag.has_key(bag, :key) {:ok, true} """ @spec has_key(Bag.t(), any()) :: {:ok, boolean()} | {:error, any()} def has_key(%Bag{table: table}, key), do: Base.has_key(table, key) @doc """ Same as `has_key/2` but unwraps or raises on error. """ @spec has_key!(Bag.t(), any()) :: boolean() def has_key!(bag, key), do: unwrap_or_raise(has_key(bag, key)) @doc """ Returns contents of table as a list. ## Examples iex> Bag.new!() iex> |> Bag.add!({:a, :b, :c}) iex> |> Bag.add!({:d, :e, :f}) iex> |> Bag.add!({:d, :e, :f}) iex> |> Bag.to_list() {:ok, [{:d, :e, :f}, {:d, :e, :f}, {:a, :b, :c}]} """ @spec to_list(Bag.t()) :: {:ok, [tuple()]} | {:error, any()} def to_list(%Bag{table: table}), do: Base.to_list(table) @doc """ Same as `to_list/1` but unwraps or raises on error. """ @spec to_list!(Bag.t()) :: [tuple()] def to_list!(%Bag{} = bag), do: unwrap_or_raise(to_list(bag)) @doc """ Deletes specified Bag. ## Examples iex> {:ok, bag} = Bag.new() iex> {:ok, _} = Bag.info(bag, true) iex> {:ok, _} = Bag.delete(bag) iex> Bag.info(bag, true) {:error, :table_not_found} """ @spec delete(Bag.t()) :: {:ok, Bag.t()} | {:error, any()} def delete(%Bag{table: table} = bag), do: Base.delete(table, bag) @doc """ Same as `delete/1` but unwraps or raises on error. """ @spec delete!(Bag.t()) :: Bag.t() def delete!(%Bag{} = bag), do: unwrap_or_raise(delete(bag)) @doc """ Deletes record with specified key in specified Bag. ## Examples iex> bag = Bag.new!() iex> Bag.add(bag, {:a, :b, :c}) iex> Bag.delete(bag, :a) iex> Bag.lookup!(bag, :a) [] """ @spec delete(Bag.t(), any()) :: {:ok, Bag.t()} | {:error, any()} def delete(%Bag{table: table} = bag, key), do: Base.delete_records(table, key, bag) @doc """ Same as `delete/2` but unwraps or raises on error. """ @spec delete!(Bag.t(), any()) :: Bag.t() def delete!(%Bag{} = bag, key), do: unwrap_or_raise(delete(bag, key)) @doc """ Deletes all records in specified Bag. ## Examples iex> bag = Bag.new!() iex> bag iex> |> Bag.add!({:a, :b, :c}) iex> |> Bag.add!({:b, :b, :c}) iex> |> Bag.add!({:c, :b, :c}) iex> |> Bag.to_list!() [{:c, :b, :c}, {:b, :b, :c}, {:a, :b, :c}] iex> Bag.delete_all(bag) iex> Bag.to_list!(bag) [] """ @spec delete_all(Bag.t()) :: {:ok, Bag.t()} | {:error, any()} def delete_all(%Bag{table: table} = bag), do: Base.delete_all_records(table, bag) @doc """ Same as `delete_all/1` but unwraps or raises on error. """ @spec delete_all!(Bag.t()) :: Bag.t() def delete_all!(%Bag{} = bag), do: unwrap_or_raise(delete_all(bag)) @doc """ Wraps an existing :ets :bag or :duplicate_bag in a Bag struct. ## Examples iex> :ets.new(:my_ets_table, [:bag, :named_table]) iex> {:ok, bag} = Bag.wrap_existing(:my_ets_table) iex> Bag.info!(bag)[:name] :my_ets_table """ @spec wrap_existing(ETS.table_identifier()) :: {:ok, Bag.t()} | {:error, any()} def wrap_existing(table_identifier) do case Base.wrap_existing(table_identifier, [:bag, :duplicate_bag]) do {:ok, {table, info}} -> {:ok, %Bag{table: table, info: info, duplicate: info[:type] == :duplicate_bag}} {:error, reason} -> {:error, reason} end end @doc """ Same as `wrap_existing/1` but unwraps or raises on error. """ @spec wrap_existing!(ETS.table_identifier()) :: Bag.t() def wrap_existing!(table_identifier), do: unwrap_or_raise(wrap_existing(table_identifier)) end
lib/ets/bag.ex
0.932361
0.858955
bag.ex
starcoder
defmodule EQC.Cluster do @copyright "Quviq AB, 2014-2016" @moduledoc """ This module contains macros to be used with [Quviq QuickCheck](http://www.quviq.com). It defines Elixir versions of the Erlang macros found in `eqc/include/eqc_cluster.hrl`. For detailed documentation of the macros, please refer to the QuickCheck documentation. `Copyright (C) Quviq AB, 2014-2016.` """ defmacro __using__(_opts) do quote do import :eqc_cluster, only: [commands: 1, commands: 2, adapt_commands: 2, state_after: 2, api_spec: 1] import :eqc_statem, only: [eq: 2, command_names: 1, more_commands: 2] import :eqc_mocking, only: [start_mocking: 2, stop_mocking: 0] import EQC.Cluster @tag eqc_callback: :eqc_cluster end end # -- Wrapper functions ------------------------------------------------------ @doc """ Same as `:eqc_cluster.run_commands/2` but returns a keyword list with `:history`, `:state`, and `:result` instead of a tuple. """ def run_commands(mod, cmds) do run_commands(mod, cmds, []) end @doc """ Same as `:eqc_cluster.run_commands/3` but returns a keyword list with `:history`, `:state`, and `:result` instead of a tuple. """ def run_commands(mod, cmds, env) do {history, state, result} = :eqc_cluster.run_commands(mod, cmds, env) [history: history, state: state, result: result] end @doc """ Same as `:eqc_component.pretty_commands/4` but takes a keyword list with `:history`, `:state`, and `:result` instead of a tuple as the third argument. """ def pretty_commands(mod, cmds, res, bool) do :eqc_component.pretty_commands(mod, cmds, {res[:history], res[:state], res[:result]}, bool) end @doc """ Generate a weight function given a keyword list of component names and weights. Usage: weight component1: weight1, component2: weight2 Components not in the list get weight 1. """ defmacro weight(cmds) do for {cmd, w} <- cmds do quote do def weight(unquote(cmd)) do unquote(w) end end end ++ [ quote do def weight(_) do 1 end end ] end end
lib/eqc/cluster.ex
0.772359
0.463141
cluster.ex
starcoder
defmodule Asteroid.Token.RefreshToken do import Asteroid.Utils alias Asteroid.Context alias Asteroid.Client alias Asteroid.Token @moduledoc """ Refresh token structure ## Field naming The `data` field holds the token data. The following field names are standard and are used by Asteroid: - `"exp"`: the expiration unix timestamp of the refresh token - `"sub"`: the `t:Asteroid.Subject.id/0` of the refresh token - `"client_id"`: the `t:Asteroid.Client.id/0` of the refresh token - `"device_id"`: the `t:Asteroid.Device.id/0` of the refresh token - `"scope"`: a list of `OAuth2Utils.Scope.scope()` scopes granted to the refresh token - `"__asteroid_oauth2_initial_flow"`: the initial `t:Asteroid.OAuth2.flow_str/0` during which the refresh token was granted - `"__asteroid_oidc_authenticated_session_id"`: the `t:Asteroid.OIDC.AuthenticatedSession.id/0` of the refresh token - `"__asteroid_oidc_claims"`: the claims that were requested, if any - `"__asteroid_oidc_initial_acr"`: the `t:Asteroid.OIDC.acr/0` of the refresh token, if any. This is the value got from the session when the token was first released - `"__asteroid_oidc_initial_amr"`: a list of `t:Asteroid.OIDC.acr/0` of the refresh token, if any. This is the value got from the session when the token was first released - `"__asteroid_oidc_initial_auth_time"`: a `non_neg_integer()` of the refresh token, if any. This is the value got from the session when the token was first released - `"status"`: a `String.t()` for the status of the token. A token that has been revoked is not necessarily still present in the token store (e.g. for stateful tokens it will be probably deleted). Optionally one of: - `"active"`: active token - `"revoked"`: revoked token """ @enforce_keys [:id, :serialization_format, :data] defstruct [:id, :data, :serialization_format] @type id :: binary() @type t :: %__MODULE__{ id: __MODULE__.id(), serialization_format: Asteroid.Token.serialization_format(), data: map() } @doc ~s""" Creates a new refresh token ## Options - `:id`: `String.t()` id, **mandatory** - `:data`: a data `map()` - `:serialization_format`: an `t:Asteroid.Token.serialization_format/0` atom, defaults to `:opaque` """ @spec new(Keyword.t()) :: t() def new(opts) do %__MODULE__{ id: opts[:id] || raise("Missing refresh token id"), data: opts[:data] || %{}, serialization_format: opts[:serialization_format] || :opaque } end @doc """ Generates a new refresh token ## Options - `:serialization_format`: an `t:Asteroid.Token.serialization_format/0` atom, defaults to `:opaque` """ @spec gen_new(Keyword.t()) :: t() def gen_new(opts \\ []) do %__MODULE__{ id: secure_random_b64(), data: %{}, serialization_format: if(opts[:format], do: opts[:format], else: :opaque) } end @doc """ Gets a refresh token from the refresh token store Unlike the `c:Asteroid.ObjectStore.RefreshToken.get/2`, this function returns `{:error, :nonexistent_refresh_token}` if the refresh token is not found in the token store. ## Options - `:check_active`: determines whether the validity of the refresh token should be checked. Defaults to `true`. For validity checking details, see `active?/1` """ @spec get(id(), Keyword.t()) :: {:ok, t()} | {:error, Exception.t()} def get(refresh_token_id, opts \\ [check_active: true]) do rt_store_module = astrenv(:object_store_refresh_token)[:module] rt_store_opts = astrenv(:object_store_refresh_token)[:opts] || [] case rt_store_module.get(refresh_token_id, rt_store_opts) do {:ok, refresh_token} when not is_nil(refresh_token) -> if opts[:check_active] != true or active?(refresh_token) do {:ok, refresh_token} else {:error, Token.InvalidTokenError.exception( sort: "refresh token", reason: "inactive token", id: refresh_token_id )} end {:ok, nil} -> {:error, Token.InvalidTokenError.exception( sort: "refresh token", reason: "not found in the token store", id: refresh_token_id )} {:error, error} -> {:error, error} end end @doc """ Stores a refresh token """ @spec store(t(), Context.t()) :: {:ok, t()} | {:error, any()} def store(refresh_token, ctx \\ %{}) do rt_store_module = astrenv(:object_store_refresh_token)[:module] rt_store_opts = astrenv(:object_store_refresh_token)[:opts] || [] refresh_token = astrenv(:object_store_refresh_token_before_store_callback).(refresh_token, ctx) case rt_store_module.put(refresh_token, rt_store_opts) do :ok -> {:ok, refresh_token} {:error, _} = error -> error end end @doc """ Deletes a refresh token """ @spec delete(t() | id()) :: :ok | {:error, any()} def delete(%__MODULE__{id: id}) do delete(id) end def delete(refresh_token_id) do rt_store_module = astrenv(:object_store_refresh_token)[:module] rt_store_opts = astrenv(:object_store_refresh_token)[:opts] || [] rt_store_module.delete(refresh_token_id, rt_store_opts) at_store_module = astrenv(:object_store_access_token)[:module] at_store_opts = astrenv(:object_store_access_token)[:opts] || [] case at_store_module.get_from_refresh_token_id(refresh_token_id, rt_store_opts) do {:ok, access_token_ids} -> for access_token_id <- access_token_ids do at_store_module.delete(access_token_id, at_store_opts) end :ok {:error, _} = error -> error end end @doc """ Puts a value into the `data` field of refresh token If the value is `nil`, the refresh token is not changed and the filed is not added. """ @spec put_value(t(), any(), any()) :: t() def put_value(refresh_token, _key, nil), do: refresh_token def put_value(refresh_token, key, val) do %{refresh_token | data: Map.put(refresh_token.data, key, val)} end @doc """ Removes a value from the `data` field of a refresh token If the value does not exist, does nothing. """ @spec delete_value(t(), any()) :: t() def delete_value(refresh_token, key) do %{refresh_token | data: Map.delete(refresh_token.data, key)} end @doc """ Serializes the refresh token, using its inner `t:Asteroid.Token.serialization_format/0` information Supports serialization to `:opaque` serialization format. """ @spec serialize(t()) :: String.t() def serialize(%__MODULE__{id: id, serialization_format: :opaque}) do id end @doc """ Returns `true` if the token is active, `false` otherwise The following data, *when set*, are used to determine that a token is active: - `"nbf"`: must be lower than current time - `"exp"`: must be higher than current time - `"revoked"`: must be the boolean `false` """ @spec active?(t()) :: boolean() def active?(refresh_token) do (is_nil(refresh_token.data["nbf"]) or refresh_token.data["nbf"] < now()) and (is_nil(refresh_token.data["exp"]) or refresh_token.data["exp"] > now()) and (is_nil(refresh_token.data["status"]) or refresh_token.data["status"] != "revoked") # FIXME: implement the following items from https://tools.ietf.org/html/rfc7662#section-4 # o If the token has been signed, the authorization server MUST # validate the signature. # o If the token can be used only at certain resource servers, the # authorization server MUST determine whether or not the token can # be used at the resource server making the introspection call. end @doc """ Returns `true` if a refresh token is to be issued, `false` otherwise ## Processing rules - If the client has the following field set to `true` for the corresponding flow and grant type, returns `true`: - `"__asteroid_oauth2_flow_ropc_issue_refresh_token_init"` - `"__asteroid_oauth2_flow_ropc_issue_refresh_token_refresh"` - `"__asteroid_oauth2_flow_client_credentials_issue_refresh_token_init"` - `"__asteroid_oauth2_flow_client_credentials_issue_refresh_token_refresh"` - `"__asteroid_oauth2_flow_authorization_code_issue_refresh_token_init"` - `"__asteroid_oauth2_flow_authorization_code_issue_refresh_token_refresh"` - `"__asteroid_oauth2_flow_device_authorization_issue_refresh_token_init"` - `"__asteroid_oauth2_flow_device_authorization_issue_refresh_token_refresh"` - `"__asteroid_oidc_flow_authorization_code_issue_refresh_token_init"` - `"__asteroid_oidc_flow_authorization_code_issue_refresh_token_refresh"` - `"__asteroid_oidc_flow_hybrid_issue_refresh_token_init"` - `"__asteroid_oidc_flow_hybrid_issue_refresh_token_refresh"` - Otherwise, if the following configuration option is set to `true` for the corresponding flow and grant type, returns `true`: - #{Asteroid.Config.link_to_option(:oauth2_flow_ropc_issue_refresh_token_init)} - #{Asteroid.Config.link_to_option(:oauth2_flow_ropc_issue_refresh_token_refresh)} - #{Asteroid.Config.link_to_option(:oauth2_flow_client_credentials_issue_refresh_token_init)} - #{ Asteroid.Config.link_to_option(:oauth2_flow_client_credentials_issue_refresh_token_refresh) } - #{Asteroid.Config.link_to_option(:oauth2_flow_authorization_code_issue_refresh_token_init)} - #{ Asteroid.Config.link_to_option(:oauth2_flow_authorization_code_issue_refresh_token_refresh) } - #{ Asteroid.Config.link_to_option(:oauth2_flow_device_authorization_issue_refresh_token_init) } - #{ Asteroid.Config.link_to_option(:oauth2_flow_device_authorization_issue_refresh_token_refresh) } - #{Asteroid.Config.link_to_option(:oidc_flow_authorization_code_issue_refresh_token_init)} - #{Asteroid.Config.link_to_option(:oidc_flow_authorization_code_issue_refresh_token_refresh)} - #{Asteroid.Config.link_to_option(:oidc_flow_hybrid_issue_refresh_token_init)} - #{Asteroid.Config.link_to_option(:oidc_flow_hybrid_issue_refresh_token_refresh)} - Otherwise, uses the following configuration options: - #{Asteroid.Config.link_to_option(:oauth2_issue_refresh_token_init)} - #{Asteroid.Config.link_to_option(:oauth2_issue_refresh_token_refresh)} - Otherwise returns `false` """ @spec issue_refresh_token?(Context.t()) :: boolean() def issue_refresh_token?(%{flow: :ropc, grant_type: :password, client: client}) do attr = "__asteroid_oauth2_flow_ropc_issue_refresh_token_init" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oauth2_flow_ropc_issue_refresh_token_init, astrenv(:oauth2_issue_refresh_token_init, false) ) end end def issue_refresh_token?(%{flow: :ropc, grant_type: :refresh_token, client: client}) do attr = "__asteroid_oauth2_flow_ropc_issue_refresh_token_refresh" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oauth2_flow_ropc_issue_refresh_token_refresh, astrenv(:oauth2_issue_refresh_token_refresh, false) ) end end def issue_refresh_token?(%{ flow: :client_credentials, grant_type: :client_credentials, client: client }) do attr = "__asteroid_oauth2_flow_client_credentials_issue_refresh_token_init" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oauth2_flow_client_credentials_issue_refresh_token_init, astrenv(:oauth2_issue_refresh_token_init, false) ) end end def issue_refresh_token?(%{ flow: :client_credentials, grant_type: :refresh_token, client: client }) do attr = "__asteroid_oauth2_flow_client_credentials_issue_refresh_token_refresh" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oauth2_flow_client_credentials_issue_refresh_token_refresh, astrenv(:oauth2_issue_refresh_token_refresh, false) ) end end def issue_refresh_token?(%{ flow: :authorization_code, grant_type: :authorization_code, client: client }) do attr = "__asteroid_oauth2_flow_authorization_code_issue_refresh_token_init" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oauth2_flow_authorization_code_issue_refresh_token_init, astrenv(:oauth2_issue_refresh_token_init, false) ) end end def issue_refresh_token?(%{ flow: :authorization_code, grant_type: :refresh_token, client: client }) do attr = "__asteroid_oauth2_flow_authorization_code_issue_refresh_token_refresh" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oauth2_flow_authorization_code_issue_refresh_token_refresh, astrenv(:oauth2_issue_refresh_token_refresh, false) ) end end def issue_refresh_token?(%{ flow: :device_authorization, grant_type: :"urn:ietf:params:oauth:grant-type:device_code", client: client }) do attr = "__asteroid_oauth2_flow_device_authorization_issue_refresh_token_init" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oauth2_flow_device_authorization_issue_refresh_token_init, astrenv(:oauth2_issue_refresh_token_init, false) ) end end def issue_refresh_token?(%{ flow: :device_authorization, grant_type: :refresh_token, client: client }) do attr = "__asteroid_oauth2_flow_device_authorization_issue_refresh_token_refresh" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oauth2_flow_device_authorization_issue_refresh_token_refresh, astrenv(:oauth2_issue_refresh_token_refresh, false) ) end end def issue_refresh_token?(%{ flow: :oidc_authorization_code, grant_type: :authorization_code, client: client }) do attr = "__asteroid_oidc_flow_authorization_code_issue_refresh_token_init" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oidc_flow_authorization_code_issue_refresh_token_init, astrenv(:oauth2_issue_refresh_token_init, false) ) end end def issue_refresh_token?(%{ flow: :oidc_authorization_code, grant_type: :refresh_token, client: client }) do attr = "__asteroid_oidc_flow_authorization_code_issue_refresh_token_refresh" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oidc_flow_authorization_code_issue_refresh_token_refresh, astrenv(:oauth2_issue_refresh_token_refresh, false) ) end end def issue_refresh_token?(%{flow: :oidc_hybrid, grant_type: :authorization_code, client: client}) do attr = "__asteroid_oidc_flow_hybrid_issue_refresh_token_init" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oidc_flow_hybrid_issue_refresh_token_init, astrenv(:oauth2_issue_refresh_token_init, false) ) end end def issue_refresh_token?(%{flow: :oidc_hybrid, grant_type: :refresh_token, client: client}) do attr = "__asteroid_oidc_flow_hybrid_issue_refresh_token_refresh" client = Client.fetch_attributes(client, [attr]) if client.attrs[attr] == true do true else astrenv( :oidc_flow_authorization_code_issue_refresh_token_refresh, astrenv(:oauth2_issue_refresh_token_refresh, false) ) end end def issue_refresh_token?(_) do false end @doc """ Returns the refresh token lifetime ## Processing rules - If the client has the following field set to an integer value for the corresponding flow returns that value: - `"__asteroid_oauth2_flow_ropc_refresh_token_lifetime"` - `"__asteroid_oauth2_flow_client_credentials_refresh_token_lifetime"` - `"__asteroid_oauth2_flow_authorization_code_refresh_token_lifetime"` - `"__asteroid_oauth2_flow_device_authorization_refresh_token_lifetime"` - `"__asteroid_oidc_flow_authorization_code_refresh_token_lifetime"` - `"__asteroid_oidc_flow_hybrid_refresh_token_lifetime"` - Otherwise, if the following configuration option is set to an integer for the corresponding flow, returns its value: - #{Asteroid.Config.link_to_option(:oauth2_flow_ropc_refresh_token_lifetime)} - #{Asteroid.Config.link_to_option(:oauth2_flow_client_credentials_refresh_token_lifetime)} - #{Asteroid.Config.link_to_option(:oauth2_flow_authorization_code_refresh_token_lifetime)} - #{Asteroid.Config.link_to_option(:oauth2_flow_device_authorization_refresh_token_lifetime)} - #{Asteroid.Config.link_to_option(:oidc_flow_authorization_code_refresh_token_lifetime)} - #{Asteroid.Config.link_to_option(:oidc_flow_hybrid_refresh_token_lifetime)} - Otherwise returns the value of the #{Asteroid.Config.link_to_option(:oauth2_refresh_token_lifetime)} configuration option - Otherwise returns `0` In any case, the returned value is capped by the scope configuration. """ @spec lifetime(Context.t()) :: non_neg_integer() def lifetime(%{flow: flow, granted_scopes: granted_scopes} = ctx) do scope_config = Asteroid.OAuth2.Scope.configuration_for_flow(flow) case Asteroid.OAuth2.Scope.max_refresh_token_lifetime(granted_scopes, scope_config) do capped_lifetime when is_integer(capped_lifetime) -> min(lifetime_for_client(ctx), capped_lifetime) nil -> lifetime_for_client(ctx) end end # no scopes def lifetime(ctx) do lifetime_for_client(ctx) end @spec lifetime_for_client(Context.t()) :: non_neg_integer() defp lifetime_for_client(%{flow: flow, client: client}) do attr = case flow do :ropc -> "__asteroid_oauth2_flow_ropc_refresh_token_lifetime" :client_credentials -> "__asteroid_oauth2_flow_client_credentials_refresh_token_lifetime" :authorization_code -> "__asteroid_oauth2_flow_authorization_code_refresh_token_lifetime" :device_authorization -> "__asteroid_oauth2_flow_device_authorization_refresh_token_lifetime" :oidc_authorization_code -> "__asteroid_oidc_flow_authorization_code_refresh_token_lifetime" :oidc_hybrid -> "__asteroid_oidc_flow_hybrid_refresh_token_lifetime" end client = Client.fetch_attributes(client, [attr]) case client.attrs[attr] do lifetime when is_integer(lifetime) -> lifetime _ -> conf_opt = case flow do :ropc -> :oauth2_flow_ropc_refresh_token_lifetime :client_credentials -> :oauth2_flow_client_credentials_refresh_token_lifetime :authorization_code -> :oauth2_flow_authorization_code_refresh_token_lifetime :device_authorization -> :oauth2_flow_device_authorization_refresh_token_lifetime :oidc_authorization_code -> :oidc_flow_authorization_code_refresh_token_lifetime :oidc_hybrid -> :oidc_flow_hybrid_refresh_token_lifetime end astrenv(conf_opt, astrenv(:oauth2_refresh_token_lifetime, 0)) end end defp lifetime_for_client(_) do 0 end end
lib/asteroid/token/refresh_token.ex
0.888879
0.563438
refresh_token.ex
starcoder
defmodule Bacen.STA.Protocol do @moduledoc """ The protocol message schema for Sisbacen's server. This message is responsible to create a new protocol into Sisbacen's server to allow the application to send one of the ACCS/CCS messages for given protocol. It has the following XML examples: ```xml <Parametros> <IdentificadorDocumento>ACCS001</IdentificadorDocumento> <Hash>1235345hfdsahgdasd214312</Hash> <Tamanho>1234</Tamanho> <NomeArquivo>202105072230.xml</NomeArquivo> <Observacao /> </Parametros> ``` ```xml <Parametros> <IdentificadorDocumento>ACCS001</IdentificadorDocumento> <Hash>1235345hfdsahgdasd214312</Hash> <Tamanho>1234</Tamanho> <NomeArquivo>202105072230.xml</NomeArquivo> <Observacao>bla bla bla</Observacao> </Parametros> ``` ```xml <Parametros> <IdentificadorDocumento>ACCS001</IdentificadorDocumento> <Hash>1235345hfdsahgdasd214312</Hash> <Tamanho>1234</Tamanho> <NomeArquivo>202105072230.xml</NomeArquivo> <Observacao>bla bla bla</Observacao> <Destinatarios> <Destinatario> <Unidade>12345</Unidade> <Dependencia>dependencia 1</Dependencia> <Operador>operador 1</Operador> </Destinatario> </Destinatarios> </Parametros> ``` """ use Ecto.Schema import Ecto.Changeset @typedoc """ The Bacen's CCS protocol type """ @type t :: %__MODULE__{} @parameters_fields ~w(file_type hash file_size file_name observation)a @parameters_required_fields ~w(file_type hash file_size file_name)a @sender_fields ~w(unity dependency operator)a @primary_key false embedded_schema do embeds_one :parameters, Parameters, primary_key: false, source: :Parametros do field :file_type, :string, source: :IdentificadorDocumento field :hash, :string, source: :Hash field :file_size, :integer, source: :Tamanho field :file_name, :string, source: :NomeArquivo field :observation, :string, source: :Observacao embeds_one :senders, Senders, primary_key: false, source: :Destinatarios do embeds_many :sender, Sender, primary_key: false, source: :Destinatario do field :unity, :string, source: :Unidade field :dependency, :string, source: :Dependencia field :operator, :string, source: :Operador end end end end @doc """ Create new valid protocol xml from given attributes """ @spec new(map()) :: {:ok, t()} | {:error, Ecto.Changeset.t()} def new(attrs \\ %{}) do %__MODULE__{} |> changeset(attrs) |> apply_action(:insert) end @doc false def changeset(schema = %__MODULE__{}, attrs) when is_map(attrs) do schema |> cast(attrs, []) |> cast_embed(:parameters, with: &parameters_changeset/2, required: true) end @doc false def parameters_changeset(parameters, attrs) when is_map(attrs) do parameters |> cast(attrs, @parameters_fields) |> validate_required(@parameters_required_fields) |> validate_length(:hash, is: 64) |> cast_embed(:senders, with: &senders_changeset/2) end @doc false def senders_changeset(senders, attrs) when is_map(attrs) do senders |> cast(attrs, []) |> cast_embed(:sender, with: &sender_changeset/2) end @doc false def sender_changeset(sender, attrs) when is_map(attrs) do sender |> cast(attrs, @sender_fields) |> validate_required(@sender_fields) end end
lib/bacen/sta/protocol.ex
0.851968
0.752559
protocol.ex
starcoder
defmodule Training.DoorLock do use GenStateMachine, callback_mode: :state_functions # This allows our graphviz directed graph to be rendered # as an image in the documentation import Training.Util.Graphviz require Logger @fsm_diagram """ digraph { rankdir=LR; node [shape=doublecircle] locked_0 node [shape=circle] locked_1 locked_2 locked_3 unlocked unlocked -> locked_0 [label="re-lock timeout"] locked_0 -> locked_1 [label="correct input"] locked_0 -> locked_0 [label="incorrect input"] locked_1 -> locked_2 [label="correct input"] locked_1 -> locked_0 [label="incorrect input"] locked_1 -> locked_0 [label="input timeout"] locked_2 -> locked_3 [label="correct input"] locked_2 -> locked_0 [label="incorrect input"] locked_2 -> locked_0 [label="input timeout"] locked_3 -> unlocked [label="correct input"] locked_3 -> locked_0 [label="incorrect input"] locked_3 -> locked_0 [label="input timeout"] } """ @fsm_diagram_generic """ digraph { rankdir=LR; node [shape=doublecircle] locked [label = "locked\n{current_correct_sequence,\nfull_correct_sequence}"] node [shape=circle] unlocked unlocked -> locked [label="re-lock timeout"] locked -> locked [label="incorrect\ncurrent_correct_sequence = []"] locked -> locked [label="input timeout\ncurrent_correct_sequence = []"] locked -> locked [label="correct\ncurrent_correct_sequence ++ input"] locked -> unlocked [label="current_correct_sequence == full_correct_sequence"] } """ @moduledoc """ Represents a door lock with an automated re-lock timeout as a state machine. This door lock also has a separate timeout that requires a user to input the correct code (start-to-finish) in a certain amount of time or else it reverts to the initial locked state, clearing any in-progress input. As a diagram, the state machine for a 4-digit code is #{image(@fsm_diagram, "jpeg")} More generically, if we add and track transient input data inside each state, we have a state machine diagram that looks more like #{image(@fsm_diagram_generic, "jpeg")} """ @relock_timeout 5_000 @finish_timout 2_000 @doc """ Start a door lock with the specified correct code sequence Ignores any arguments passed in """ def start_link(secret_code) do GenStateMachine.start_link(__MODULE__, [secret_code]) end @doc """ Initializes a door_lock into the `:locked` state with a people-counter of `0` """ @spec init(any()) :: :gen_statem.init_result(GenStateMachine.state()) def init([secret_code]) do # TODO set starting state and data {:ok, :todo, %{current_code: [], secret_code: []}} end @doc """ Called while in the `:unlocked` state """ @spec unlocked( GenStateMachine.event_type(), GenStateMachine.event_content(), GenStateMachine.data() ) :: :gen_statem.event_handler_result(GenStateMachine.state()) def unlocked(:state_timeout, :lock, data) do # TODO lock when the timeout occurs end def unlocked({:timeout, :finish}, :reset, data) do Logger.error("You should cancel the finish timer") {:next_state, :locked, %{data | current_code: []}} end def unlocked(_, _, _) do :keep_state_and_data end @doc """ Called while in the `:locked` state """ @spec locked( GenStateMachine.event_type(), GenStateMachine.event_content(), GenStateMachine.data() ) :: :gen_statem.event_handler_result(GenStateMachine.state()) def locked(:cast, {:input, digit}, data) do # TODO add necessary input processing and state transition handling end def locked({:timeout, :finish}, :reset, data) do {:keep_state, %{data | current_code: []}} end def locked(_, _, _) do :keep_state_and_data end end
lib/training/door_lock.ex
0.528047
0.478285
door_lock.ex
starcoder
defmodule PeertubeIndex.StatusStorage do @moduledoc false @doc """ Create an empty status storage for testing """ @callback empty() :: :ok @doc """ Create a status storage with given statuses for testing """ @callback with_statuses([tuple()]) :: :ok @doc """ Returns the list of all statuses """ @callback all() :: [tuple()] @doc """ Find instances that have the given status and with a status updated before the given date """ @callback find_instances(:ok | :error | :discovered, NaiveDateTime.t) :: [String.t] @doc """ Find instances that have the given status """ @callback find_instances(:ok | :error | :discovered) :: [String.t] @doc """ Notify a successful instance scan at the given datetime """ @callback ok_instance(String.t, NaiveDateTime.t) :: :ok @doc """ Notify a failed instance scan, with a reason, at the given datetime """ @callback failed_instance(String.t, any(), NaiveDateTime.t) :: :ok @doc """ Notify a discovered instance at the given datetime. This will not override any previously existing status for the same instance. """ @callback discovered_instance(String.t, NaiveDateTime.t) :: :ok @doc """ Notify a banned instance, with a reason, at the given datetime """ @callback banned_instance(String.t, String.t, NaiveDateTime.t) :: :ok @doc """ Returns true if a instance (identified by it's hostname) has a status in the database, and false otherwise """ @callback has_a_status(String.t) :: boolean() end defmodule PeertubeIndex.StatusStorage.Postgresql do @moduledoc false @behaviour PeertubeIndex.StatusStorage @impl true def empty do Mix.Tasks.Ecto.Drop.run([]) Mix.Tasks.Ecto.Create.run([]) Mix.Tasks.Ecto.Migrate.run([]) :ok end @impl true def with_statuses(statuses) do for status <- statuses do case status do {host, :ok, date} -> Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, "INSERT INTO statuses (host, status, date) VALUES ('#{host}', 'ok', '#{NaiveDateTime.to_iso8601(date)}')" ) {host, {:error, reason}, date} -> Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, "INSERT INTO statuses (host, status, reason, date) VALUES ('#{host}', 'error', '#{inspect(reason)}', '#{NaiveDateTime.to_iso8601(date)}')" ) {host, :discovered, date} -> Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, "INSERT INTO statuses (host, status, date) VALUES ('#{host}', 'discovered', '#{NaiveDateTime.to_iso8601(date)}')" ) {host, {:banned, reason}, date} -> Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, "INSERT INTO statuses (host, status, reason, date) VALUES ('#{host}', 'banned', '#{reason}', '#{NaiveDateTime.to_iso8601(date)}')" ) end end :ok end @impl true def all do {:ok, result} = Ecto.Adapters.SQL.query PeertubeIndex.StatusStorage.Repo, "select host, status, reason, date from statuses" for row <- result.rows do case List.to_tuple(row) do {host, "discovered", nil, date} -> {host, :discovered, date |> NaiveDateTime.truncate(:second)} {host, "ok", nil, date} -> {host, :ok, date |> NaiveDateTime.truncate(:second)} {host, "error", reason_string, date} -> {host, {:error, reason_string}, date |> NaiveDateTime.truncate(:second)} {host, "banned", reason_string, date} -> {host, {:banned, reason_string}, date |> NaiveDateTime.truncate(:second)} end end end @impl true def find_instances(wanted_status, maximum_date) do {:ok, r} = Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, " SELECT host FROM statuses WHERE status = '#{wanted_status}' AND date < '#{NaiveDateTime.to_iso8601(maximum_date)}' " ) r.rows |> Enum.map(&Enum.at(&1, 0)) end @impl true def find_instances(wanted_status) do {:ok, r} = Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, "SELECT host FROM statuses WHERE status = '#{wanted_status}'" ) r.rows |> Enum.map(&Enum.at(&1, 0)) end @impl true def ok_instance(host, date) do {:ok, _} = Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, " INSERT INTO statuses (host, status, date) VALUES ($1, 'ok', $2) ON CONFLICT (host) DO UPDATE SET status = EXCLUDED.status, date = EXCLUDED.date ", [host, date] ) :ok end @impl true def failed_instance(host, reason, date) do {:ok, _} = Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, " INSERT INTO statuses (host, status, reason, date) VALUES ($1, 'error', $2, $3) ON CONFLICT (host) DO UPDATE SET status = EXCLUDED.status, reason = EXCLUDED.reason, date = EXCLUDED.date ", [host, inspect(reason), date] ) :ok end @impl true def discovered_instance(host, date) do {:ok, _} = Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, " INSERT INTO statuses (host, status, reason, date) VALUES ($1, 'discovered', null, $2) ON CONFLICT (host) DO UPDATE SET status = EXCLUDED.status, reason = EXCLUDED.reason, date = EXCLUDED.date ", [host, date] ) :ok end @impl true def banned_instance(host, reason, date) do {:ok, _} = Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, " INSERT INTO statuses (host, status, reason, date) VALUES ($1, 'banned', $2, $3) ON CONFLICT (host) DO UPDATE SET status = EXCLUDED.status, reason = EXCLUDED.reason, date = EXCLUDED.date ", [host, reason, date] ) :ok end @impl true def has_a_status(host) do {:ok, r} = Ecto.Adapters.SQL.query( PeertubeIndex.StatusStorage.Repo, "SELECT count(*) FROM statuses WHERE host = '#{host}'" ) count = r.rows |> Enum.at(0) |> Enum.at(0) count == 1 end end defmodule PeertubeIndex.StatusStorage.Repo do use Ecto.Repo, otp_app: :peertube_index, adapter: Ecto.Adapters.Postgres def init(_, config) do url = Confex.fetch_env!(:peertube_index, :status_storage_database_url) {:ok, Keyword.put(config, :url, url)} end end
lib/peertube_index/status_storage.ex
0.803444
0.404213
status_storage.ex
starcoder
defmodule Finance.Simple do @moduledoc """ For the simplified case series of regular payments the relationship between the present value (pv), future value (fv), payment (pmt) and rate (i) over a period of (n) time periods is given by: ``` pmt(1+i) pv(1+i)^n + ------------ + fv = 0 i(1+i)^n -1 ``` This can be solved for each parameter, pv, fv, pmt, i and n. """ @doc """ Present Value for regular fixed payments. If payments made at the end of the periods (default) type = false, else if payments are made at the beginning type = true. ## Example What is the present value (e.g., the initial investment) of an investment that needs to total £15692.93 after 10 years of saving £100 every month? Assume the interest rate is 5% (annually) compounded monthly? iex> Finance.Simple.pv(-100, 0.05/Finance.Period.monthly, 10*Finance.Period.monthly, 15692.93) |> Float.round(2) -100.00 """ def pv(pmt, i, n, fv \\ 0, type \\ false) def pv(pmt, _i = 0, n, fv, _type), do: -(pmt * n + fv) def pv(pmt, i, n, fv, type) when is_boolean(type), do: -(pmt * (1.0 + ((type && i) || 0.0)) * pvifa(i, n) + fv * pvif(i, n)) @doc """ Future Value for regular fixed payments. If payments made at the end of the periods (default) type = true, else if payments are made at the beginning type = false. ## Example What is the future value after 10 years of saving $100 now, with an additional monthly savings of $100. Assume the interest rate is 5% (annually) compounded monthly? iex> Finance.Simple.fv(-100, -100, 0.05/Finance.Period.monthly, 10*Finance.Period.monthly) |> Float.round(2) 15692.93 By convention, the negative sign represents cash flow out (i.e. money not available today). Thus, saving £100 a month at 5% annual interest leads to £15,692.93 available to spend in 10 years. """ def fv(pv, pmt, i, n, type \\ false) def fv(pv, pmt, _i = 0, n, _type), do: -(pmt * n + pv) def fv(pv, pmt, i, n, type) when is_boolean(type), do: -(pmt * (1.0 + ((type && i) || 0.0)) * fvifa(i, n) + pv * fvif(i, n)) @doc """ Payment against the loan principal plus interest If payments made at the end of the periods (default) type = true, else if payments are made at the beginning type = false. ## Examples What is the monthly payment needed to pay off a £200,000 loan in 15 years at an annual interest rate of 7.5%? iex> Finance.Simple.pmt(200000, 0.075/Finance.Period.monthly, 15*Finance.Period.monthly, 0) |> Float.round(2) -1854.02 In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained today, a monthly payment of £1,854.02 would be required. Note that this example illustrates usage of `fv` having a default value of 0. What is the future value after 10 years of saving $100 now, with an additional monthly savings of £100. Assume the interest rate is 5% (annually) compounded monthly? iex> Finance.Simple.pmt(-100, 0.05/Finance.Period.monthly, 10*Finance.Period.monthly, 15692.93) |> Float.round(2) -100.00 """ def pmt(pv, i, n, fv \\ 0, type \\ false) def pmt(pv, _i = 0, n, fv, _type), do: -(fv + pv) / n def pmt(pv, i, n, fv, type) when is_boolean(type), do: -(fv + pv * fvif(i, n)) / ((1.0 + ((type && i) || 0.0)) * fvifa(i, n)) @doc """ Number of payment periods ##Example If you only had £150/month to pay towards the loan, how long would it take to pay-off a loan of £8,000 at 7% annual interest? iex> Finance.Simple.nper(8000, -150, 0.07/Finance.Period.monthly) |> Float.round(5) 64.07335 So, just over 64 months would be required to pay off the loan. """ def nper(pv, pmt, i, fv \\ 0, type \\ false) def nper(pv, pmt, _i = 0, fv, _type), do: -(pv + fv) / pmt def nper(pv, pmt, i, fv, type) when is_boolean(type) do fx = pmt * (1.0 + ((type && i) || 0.0)) / i :math.log((-fv + fx) / (pv + fx)) / :math.log(1.0 + i) end @doc """ Rate of interest per period ##Example iex> pmt = Finance.Simple.pmt(-7500, 0.015123, 48) iex> {:ok, rate} = Finance.Simple.rate(-7500, pmt, 48) iex> Float.round(rate, 6) 0.015123 """ def rate(pv, pmt, n, fv \\ 0, type \\ false) do Finance.CashFlow.irr( List.flatten([ pv + ((type && pmt) || 0.0), List.duplicate(pmt, n - 1), ((type && 0.0) || pmt) + fv ]) ) end defp pvifa(i, n), do: (:math.pow(1.0 + i, n) - 1.0) / (i * :math.pow(1.0 + i, n)) defp pvif(i, n), do: 1.0 / :math.pow(1.0 + i, n) defp fvifa(i, n), do: (:math.pow(1.0 + i, n) - 1.0) / i defp fvif(i, n), do: :math.pow(1.0 + i, n) end
lib/finance/simple.ex
0.854278
0.918077
simple.ex
starcoder
defmodule Surgex.Parser.SortParser do @moduledoc """ Parses the JSON API's sort parameter according to the [JSON API spec](http://jsonapi.org/format/#fetching-sorting). Produces a `{direction, column}` tuple, in which `direction` is either `:asc` or `:desc` and `column` is a safely atomized and underscored column name. """ @doc false @spec call(term(), [atom]) :: {:ok, {:asc | :desc, atom} | nil} | {:error, :invalid_sort_column} def call(nil, _allowed_columns), do: {:ok, nil} def call("", _allowed_columns), do: {:ok, nil} def call(input, allowed_columns) when is_binary(input) do case input do "-" <> column -> validate_allowed_columns(column, allowed_columns, :desc) column -> validate_allowed_columns(column, allowed_columns, :asc) end end def call(_input, _), do: {:error, :invalid_sort_column} defp validate_allowed_columns(column, allowed_columns, direction) do column_atom = atomize_maybe_dasherized(column) if column_atom && column_atom in allowed_columns do {:ok, {direction, column_atom}} else {:error, :invalid_sort_column} end end defp atomize_maybe_dasherized(string) do atomize(string) || string |> String.replace("-", "_") |> atomize end defp atomize(string) do String.to_existing_atom(string) rescue ArgumentError -> nil end @doc """ Flattens the result of the parser (sort tuple) into `*_by` and `*_direction` keys. ## Examples iex> SortParser.flatten({:ok, sort: {:asc, :col}}, :sort) {:ok, sort_by: :col, sort_direction: :asc} """ @spec flatten({:ok, Keyword.t()}, atom) :: {:ok, {:asc | :desc, atom}} def flatten({:ok, opts}, key) do case Keyword.pop(opts, key) do {nil, _} -> {:ok, opts} {{direction, column}, rem_opts} -> final_opts = Keyword.merge(rem_opts, sort_by: column, sort_direction: direction) {:ok, final_opts} end end def flatten(input, _key), do: input end
lib/surgex/parser/parsers/sort_parser.ex
0.896142
0.675765
sort_parser.ex
starcoder
defmodule Cog.V1.PermissionGrantController do use Cog.Web, :controller alias Cog.Models.Permission alias Cog.Models.Role plug Cog.Plug.Authentication plug Cog.Plug.Authorization, [permission: "#{Cog.Util.Misc.embedded_bundle}:manage_roles"] when action == :manage_role_permissions plug :put_view, Cog.V1.PermissionView def manage_role_permissions(conn, params), do: manage_permissions(conn, Role, params) # Grant or revoke an arbitrary number of permissions (specified as # namespaced names) from the identified entity (i.e., the thing of # type `type` the given `id`). Returns a detail list of all # *directly*-granted permissions the thing has has following the # grant / revoke actions. defp manage_permissions(conn, type, %{"id" => id, "permissions" => permission_spec}) do result = Repo.transaction(fn() -> permittable = Repo.get!(type, id) permissions_to_grant = lookup_or_fail(permission_spec, "grant") permissions_to_revoke = lookup_or_fail(permission_spec, "revoke") permittable |> grant(permissions_to_grant) |> revoke(permissions_to_revoke) |> Repo.preload(permissions: :bundle) end) case result do {:ok, permittable} -> conn |> render("index.json", permissions: permittable.permissions) {:error, {:not_found, {"permissions", names}}} -> conn |> put_status(:unprocessable_entity) |> json(%{"errors" => %{"not_found" => %{"permissions" => names}}}) end end defp lookup_or_fail(permission_spec, operation) do names = get_in(permission_spec, [operation]) || [] case lookup_all("permissions", names) do {:ok, structs} -> structs {:error, reason} -> Repo.rollback(reason) end end defp lookup_all(_, []), do: {:ok, []} # Don't bother with a DB lookup defp lookup_all("permissions", names) do # Resolve each namespaced name to a %Permission{}. Return a list # of tuples `{name, lookup_result}` for future filtering results = Enum.map(names, fn(name) -> permission = Cog.Queries.Permission.from_full_name(name) |> Repo.one {name, permission} end) # Figure out which of those permissions actually exist in the # system. If any don't (signified by `nil`), we'll use this for an # error message. {missing, found} = Enum.partition(results, fn({_,nil}) -> true ({_,%Permission{}}) -> false end) case missing do [] -> # They're all real permissions! Get rid of the wrapping tuple; # just give back the permissions unwrapped = Enum.map(found, fn({_,p}) -> p end) {:ok, unwrapped} missing -> # Oops, you gave us permission names that don't actually # exist. Return the names in an error tuple names = Enum.map(missing, fn({n,_}) -> n end) {:error, {:not_found, {"permissions", names}}} end end defp grant(user, permissions) do Enum.each(permissions, &Permittable.grant_to(user, &1)) user end defp revoke(user, permissions) do Enum.each(permissions, &Permittable.revoke_from(user, &1)) user end end
web/controllers/v1/permission_grant_controller.ex
0.760028
0.428951
permission_grant_controller.ex
starcoder
defmodule PlymioAstVormHelper do alias Plymio.Ast.Vorm, as: PAV use Plymio.Ast.Vorm.Attribute use PlymioAstEvalAttributeHelper import PlymioAstEvalHelper, only: [ helper_ast_eval: 2, helper_ast_eval_normalise_error: 1, helper_ast_eval_compare: 3, helper_ast_eval_opts_canon_keys!: 1, ] @helper_vorm_produce_opts_keys_eval [ @helper_opts_key_binding, @helper_opts_key_expect_value, @helper_opts_key_expect_text, @helper_opts_key_expect_texts, @helper_opts_key_expect_form, ] def helper_vorm_produce(vorm, opts) def helper_vorm_produce(%PAV{}= vorm, opts) when is_list(opts) do opts = opts |> helper_ast_eval_opts_canon_keys! eval_opts = opts |> Keyword.take(@helper_vorm_produce_opts_keys_eval) with {:ok, {forms, %PAV{}}} <- vorm |> PAV.produce do forms |> helper_ast_eval(eval_opts) else {:error, _} = result -> result end end def helper_vorm_produce_pipeline(pipeline, opts \\ []) def helper_vorm_produce_pipeline(pipeline, opts) when is_list(opts) do opts = opts |> helper_ast_eval_opts_canon_keys! eval_opts = opts |> Keyword.take(@helper_vorm_produce_opts_keys_eval) with {:ok, vorm} <- PAV.new(), {:ok, {forms, %PAV{}}} <- pipeline |> PAV.produce_pipeline(vorm) do forms |> helper_ast_eval(eval_opts) else {:error, _} = result -> result end end def helper_vorm_produce_pipeline!(pipeline, opts \\ []) def helper_vorm_produce_pipeline!(pipeline, opts) when is_list(opts) do opts = opts |> helper_ast_eval_opts_canon_keys! pipeline |> helper_vorm_produce_pipeline(opts) |> case do {:ok, {_result, _text, _ast} = result} -> result {:error, error} = result-> case opts |> Keyword.has_key?(@helper_opts_key_expect_error) do true -> error |> helper_ast_eval_normalise_error |> helper_ast_eval_compare(@helper_opts_key_expect_error, opts) |> case do # expected error matches {:ok, actual_error} -> {:ok, {actual_error, nil, nil}} _ -> result end # no error to compare with _ -> result end |> case do # expected error matches {:ok, _} = result -> result {:error, error} -> case error |> Exception.exception? do true -> raise error end end end end def helper_vorm_show_forms(vorm, opts \\ []) def helper_vorm_show_forms(%PAV{} = vorm, _opts) do with {:ok, {forms, %PAV{}}} <- vorm |> PAV.express do {:ok, forms |> Enum.map(&Macro.to_string/1)} else {:error, _} = result -> result end end def helper_vorm_show_forms!(vorm, opts \\ []) do with {:ok, forms} <- vorm |> helper_vorm_show_forms(opts) do forms else {:error, error} -> raise error end end def helper_vorm_test_forms!(vorm, opts \\ []) def helper_vorm_test_forms!(%PAV{} = vorm, opts) do vorm |> helper_vorm_produce(opts) |> case do {:ok, {result, texts, _forms}} -> {result, texts} {:error, error} -> raise error end end end
test/helper/vorm.ex
0.675122
0.477311
vorm.ex
starcoder
defmodule Hub do @moduledoc """ Pub-sub hub Subscription is done with a pattern. Example: ``` Hub.subscribe("global", %{count: count} when count > 42) Hub.publish("global", %{count: 45, message: "You rock!"}) ``` """ alias Hub.Channel alias Hub.ChannelRegistry alias Hub.ChannelSupervisor alias Hub.Subscriber @doc """ Unsubscribes using the reference returned on subscribe. """ defdelegate unsubscribe(ref), to: Channel defdelegate unsubscribe_and_flush(ref), to: Channel @doc """ Convenience macro for subscribing without the need to unquote the pattern. Example: ``` Hub.subscribe("global", %{count: count} when count > 42) ``` """ defmacro subscribe(channel_name, pattern, options \\ []) do quote do {bind_quoted, options} = unquote(options) |> Keyword.pop(:bind_quoted, []) quoted_pattern = unquote(Macro.escape(pattern)) |> Hub.replace_pins(bind_quoted) Hub.subscribe_quoted(unquote(channel_name), quoted_pattern, options) end end @doc """ Publishes the term to all subscribers that matches it. Returns the number of subscribers that got the message. """ @spec publish(String.t(), any) :: non_neg_integer def publish(channel_name, term) do case lookup_channel(channel_name) do {:ok, channel} -> Channel.publish(channel, term) :not_found -> 0 end end @doc """ Subscribes to the quoted pattern in the given channel_name. Example: ``` Hub.subscribe("global", quote do: %{count: count} when count > 42) ``` """ @spec subscribe_quoted(String.t(), any, Channel.subscribe_options()) :: {:ok, Channel.subscription_ref()} | {:error, reason :: String.t()} def subscribe_quoted(channel_name, quoted_pattern, options \\ []) do channel = upsert_channel(channel_name) Channel.subscribe_quoted(channel, quoted_pattern, options) end @doc """ Get all subscribers from channel. """ @spec subscribers(String.t()) :: [Subscriber.t()] def subscribers(channel_name) do case lookup_channel(channel_name) do {:ok, channel} -> Channel.subscribers(channel) :not_found -> [] end end @doc false def replace_pins(ast, [] = _binding) do ast end def replace_pins(ast, bindings) do {ast, _acc} = Macro.traverse( ast, nil, fn ast, _acc -> ast = traverse_pin(ast, bindings) {ast, nil} end, fn ast, _acc -> {ast, nil} end ) ast end defp traverse_pin({:^, _, [{name, _, atom}]} = term, bindings) when is_atom(atom) do case Keyword.fetch(bindings, name) do {:ok, value} -> Macro.escape(value) :error -> term end end defp traverse_pin(ast, _bindings) do ast end defp upsert_channel(channel_name) do case lookup_channel(channel_name) do {:ok, channel} -> channel :not_found -> case ChannelSupervisor.start_child(channel_name) do {:ok, channel} -> channel :ignore -> # Handle race condition where two processes are creating a channel at the same time {:ok, channel} = lookup_channel(channel_name) channel end end end defp lookup_channel(channel_name) do ChannelRegistry.lookup(channel_name) end end
lib/hub.ex
0.89174
0.856332
hub.ex
starcoder
defmodule Reactivity.Quality.Context do @moduledoc false alias Observables.Obs require Logger def combine(_, nil), do: nil @doc """ combines a list of contexts in the case of enforcing time synchronization Takes a mixed list of tuples {oldest_timestamp, newest_timestamp} and timestamps ti Returns a tuple containing the oldest, respectively the most recent timestamp in the list. [{tl1,th1}, t2, t3, {t4l, t4h}, ... , {tln,thn}] -> {tl_min, tl_max} """ def combine(contexts, {:t, _}), do: combine(contexts, :t) def combine(contexts, :t) do lows = contexts |> Stream.map(fn {low, _high} -> low time -> time end) highs = contexts |> Stream.map(fn {_low, high} -> high time -> time end) {low, high} = {Enum.min(lows), Enum.max(highs)} if (low == high), do: low, else: {low, high} end @doc """ combines a list of contexts in the case of enforcing glitch freedom [[{s11,{c1low, c1high}},...,{s1n, c1n}], ... , [{sm1,cm1},...,{smn,cmn}]]-> [{sa, ca}, {sb,cb}, ...] Joins the list of contexts into one context of tuples {sender, counter} and removes duplicate tuples """ def combine(contexts, {:g, _}), do: combine(contexts, :g) def combine(contexts, :g) do contexts |> List.flatten |> Enum.group_by(&(elem(&1,0))) |> Map.values |> Enum.map(fn [h | []] -> h slst -> lows = slst |> Stream.map(fn {_, {low, _}} -> low {_, counter} -> counter end) highs = slst |> Stream.map(fn {_, {_, high}} -> high {_, counter} -> counter end) s = slst |> List.first |> elem(0) {low, high} = {Enum.min(lows), Enum.max(highs)} c = if (low == high), do: low, else: {low, high} {s, c} end) end @doc """ Decides whether a given context is acceptable under given consistency guarantee. """ def sufficient_quality?(_, nil), do: true def sufficient_quality?(context, {cgt, cgm}) do penalty(context, cgt) <= cgm end @doc """ Calculates the penalty of a context under the given guarantee """ defp penalty(context, {:g, _}), do: penalty(context, :g) defp penalty(context, :g) do context |> Stream.map(fn {_s, {low, high}} -> high-low {_s, _counter} -> 0 end) |> Enum.max end defp penalty(context, {:t, _}), do: penalty(context, :t) defp penalty({low, high}, :t), do: high-low defp penalty(_time, :t), do: 0 @doc """ Creates an observable carrying the contexts for the given guarantee at the rate of the given observalbe. """ def new_context_obs(obs, nil) do Obs.count(obs, 0) |> Obs.map(fn _ -> nil end) end def new_context_obs(obs, {:g, _}), do: new_context_obs(obs, :g) def new_context_obs(obs, :g) do {_f, pid} = obs Obs.count(obs, 0) |> Obs.map(fn n -> [{{node(), pid}, n-1}] end) end def new_context_obs(obs, {:t, _}), do: new_context_obs(obs, :t) def new_context_obs(obs, :t) do Obs.count(obs, 0) |> Obs.map(fn n -> n-1 end) end end
lib/reactivity/quality/context.ex
0.612078
0.559049
context.ex
starcoder
defmodule Wasmex.Instance do @moduledoc """ Instantiates a WebAssembly module represented by bytes and allows calling exported functions on it. ```elixir # Get the Wasm module as bytes. {:ok, bytes } = File.read("wasmex_test.wasm") # Instantiates the Wasm module. {:ok, instance } = Wasmex.Instance.from_bytes(bytes) # Test for existence of a function true = Wasmex.Instance.function_export_exists(instance, "sum") ``` All exported functions are accessible via `call_exported_function`. Arguments of these functions are automatically casted to WebAssembly values. Note that WebAssembly only knows number datatypes (floats and integers of various sizes). You can pass arbitrary data to WebAssembly, though, by writing this data into its memory. The `memory` function returns a `Memory` struct representing the memory of that particular instance, e.g.: ```elixir {:ok, memory} = Wasmex.Instance.memory(instance, :uint8, 0) ``` This module, especially `call_exported_function` is assumed to be called within a GenServer context. """ @type t :: %__MODULE__{ resource: binary(), reference: reference() } defstruct resource: nil, # The actual NIF instance resource. # Normally the compiler will happily do stuff like inlining the # resource in attributes. This will convert the resource into an # empty binary with no warning. This will make that harder to # accidentally do. # It also serves as a handy way to tell file handles apart. reference: nil @spec from_bytes(binary(), %{optional(binary()) => (... -> any())}) :: {:error, binary()} | {:ok, __MODULE__.t()} def from_bytes(bytes, imports) when is_binary(bytes) and is_map(imports) do case Wasmex.Native.instance_new_from_bytes(bytes, imports) do {:ok, resource} -> {:ok, wrap_resource(resource)} {:error, err} -> {:error, err} end end defp wrap_resource(resource) do %__MODULE__{ resource: resource, reference: make_ref() } end @spec function_export_exists(__MODULE__.t(), binary()) :: boolean() def function_export_exists(%__MODULE__{resource: resource}, name) when is_binary(name) do Wasmex.Native.instance_function_export_exists(resource, name) end @doc """ Calls a function with the given `name` and `params` on the WebAssembly `instance`. This function assumes to be called within a GenServer context, it expects a `from` argument as given by `handle_call` etc. The WebAssembly function will be invoked asynchronously in a new OS thread. The calling process will receive a `{:returned_function_call, result, from}` message once the execution finished. The result either is an `{:error, reason}` or `{:ok, results}` tuple with `results` containing a list of the results form the called WebAssembly function. Calling `call_exported_function` usually returns an `:ok` atom but may throw a BadArg exception when given unexpected input data. """ @spec call_exported_function(__MODULE__.t(), binary(), [any()], GenServer.from()) :: any() def call_exported_function(%__MODULE__{resource: resource}, name, params, from) when is_binary(name) do Wasmex.Native.instance_call_exported_function(resource, name, params, from) end @spec memory(__MODULE__.t(), atom(), pos_integer()) :: {:error, binary()} | {:ok, Wasmex.Memory.t()} def memory(%__MODULE__{} = instance, size, offset) when size in [:uint8, :int8, :uint16, :int16, :uint32, :int32] do Wasmex.Memory.from_instance(instance, size, offset) end end defimpl Inspect, for: Wasmex.Instance do import Inspect.Algebra def inspect(dict, opts) do concat(["#Wasmex.Instance<", to_doc(dict.reference, opts), ">"]) end end
lib/wasmex/instance.ex
0.897256
0.886076
instance.ex
starcoder
defmodule Sippet.Transports do @moduledoc """ The `Sippet.Transports` is responsible for the actual transmission of requests and responses over network transports. Network transport protocols are implemented following the `Sippet.Transports.Plug` behavior, and they are configured as: config :sippet, Sippet.Transports, udp: Sippet.Transports.UDP.Plug Whenever a message is received by a plug, the `Sippet.Transports.Queue` is used to process, validate and route it through the transaction layer or core. """ import Supervisor.Spec alias Sippet.Message, as: Message alias Sippet.Message.RequestLine, as: RequestLine alias Sippet.Message.StatusLine, as: StatusLine alias Sippet.Transports.Pool, as: Pool alias Sippet.URI, as: URI @doc """ Starts the transport process hierarchy. """ @spec start_link() :: Supervisor.on_start def start_link() do children = [ Pool.spec() | plugs_specs() ] options = [ strategy: :one_for_one, name: __MODULE__ ] Supervisor.start_link(children, options) end defp plugs_specs() do :sippet |> Application.get_env(__MODULE__, []) |> plugs_specs([]) end defp plugs_specs([], result), do: result defp plugs_specs([{_protocol, module} | rest], result), do: plugs_specs(rest, [worker(module, []) | result]) @doc """ Sends a message to the network. If specified, the `transaction` will receive the transport error if occurs. See `Sippet.Transactions.receive_error/2`. This function may block the caller temporarily due to resource constraints. """ @spec send_message(Message.t, GenServer.server | nil) :: :ok def send_message(message, transaction \\ nil) do {protocol, host, port} = get_destination(message) plug = protocol |> to_plug() apply(plug, :send_message, [message, host, port, transaction]) end defp get_destination(%Message{target: target}) when is_tuple(target) do target end defp get_destination(%Message{start_line: %StatusLine{}, headers: %{via: via}} = message) do {_version, protocol, {host, port}, params} = hd(via) {host, port} = if Message.response?(message) do host = case params do %{"received" => received} -> received _otherwise -> host end port = case params do %{"rport" => ""} -> port %{"rport" => rport} -> rport |> String.to_integer() _otherwise -> port end {host, port} else {host, port} end {protocol, host, port} end defp get_destination(%Message{start_line: %RequestLine{request_uri: uri}} = request) do host = uri.host port = uri.port params = if uri.parameters == nil do %{} else URI.decode_parameters(uri.parameters) end protocol = if params |> Map.has_key?("transport") do Sippet.Message.to_protocol(params["transport"]) else {_version, protocol, _sent_by, _params} = hd(request.headers.via) protocol end {protocol, host, port} end defp to_plug(protocol) do :sippet |> Application.get_env(Sippet.Transports) |> Keyword.fetch!(protocol) end @doc """ Verifies if the transport protocol used to send the given message is reliable. """ @spec reliable?(Message.t) :: boolean def reliable?(%Message{headers: %{via: via}}) do {_version, protocol, _host_and_port, _params} = hd(via) plug = protocol |> to_plug() apply(plug, :reliable?, []) end end
lib/sippet/transports.ex
0.829871
0.47457
transports.ex
starcoder
defmodule Hornet.Scheduler do @moduledoc false use GenServer require Logger alias Hornet.DynamicSupervisor, as: HornetDynamicSupervisor alias Hornet.RateCounter alias Hornet.ParamsValidator alias Hornet.Worker.WorkerSupervisor @spec start_link(Keyword.t()) :: GenServer.on_start() def start_link(params) do clean_params = ParamsValidator.validate!(params) GenServer.start_link(__MODULE__, clean_params, name: Keyword.fetch!(params, :id)) end @spec state(atom() | pid()) :: map() def state(name) do GenServer.call(name, :state) end @spec stop(atom() | pid()) :: :ok def stop(name) do send(name, :stop) :ok end @impl true def init(params) do rate_period = params[:rate_period] {:ok, supervisor} = HornetDynamicSupervisor.start_link() {:ok, rate_counter} = DynamicSupervisor.start_child(supervisor, %{ id: RateCounter, start: {RateCounter, :start_link, [[interval: rate_period]]} }) rate = Keyword.fetch!(params, :rate) id = Keyword.fetch!(params, :id) func = Keyword.fetch!(params, :func) worker_params = [rate: rate, id: id, func: func] period = params[:start_period] period_step = params[:adjust_step] adjust_period = params[:adjust_period] error_rate = params[:error_rate] process_number_limit = params[:process_number_limit] log_period = params[:log_period] {pid, workers_count} = start_workers(supervisor, worker_params, rate_counter, period) {:ok, adjustment_timer} = :timer.send_interval(adjust_period, :adjust_workers) {:ok, log_timer} = if log_period > 0 do :timer.send_interval(log_period, :log_rates) else {:ok, nil} end state = %{ rate_counter: rate_counter, worker_supervisor: pid, supervisor: supervisor, current_workers_count: workers_count, period: period, period_step: period_step, adjust_period: adjust_period, error_rate: error_rate, params: worker_params, process_number_limit: process_number_limit, adjustment_timer: adjustment_timer, log_period: log_period, log_timer: log_timer } {:ok, state} end @impl true def handle_info(:adjust_workers, state) do cond do correct_rate?(state) -> {:noreply, state} process_number_limit?(state) -> {:noreply, state} true -> adjust_workers(state) end end @impl true def handle_info(:log_rates, state) do current_rate = state.rate_counter |> RateCounter.rate() |> round() expected_rate = state.params[:rate] error_rate = Float.floor(expected_rate * state.error_rate) Logger.info( "[Hornet] Current rate: #{current_rate} | Expected rate: #{expected_rate} | Allowed error rate: #{ error_rate }" ) {:noreply, state} end @impl true def handle_info(:stop, state) do :ok = DynamicSupervisor.stop(state.supervisor) {:stop, :normal, state} end @impl true def handle_call(:state, _from, state) do {:reply, state, state} end defp adjust_workers(state) do :ok = DynamicSupervisor.terminate_child(state.supervisor, state.worker_supervisor) new_period = state.period + state.period_step {pid, workers_count} = start_workers(state.supervisor, state.params, state.rate_counter, new_period) new_state = %{ state | worker_supervisor: pid, current_workers_count: workers_count, period: new_period } {:noreply, new_state} end defp process_number_limit?(state) do {_, new_number} = calculate_workers_number(state.params[:rate], state.period) state[:process_number_limit] && state[:process_number_limit] <= new_number end defp correct_rate?(state) do current_rate = RateCounter.rate(state.rate_counter) expected_rate = state.params[:rate] error_rate = expected_rate * state.error_rate if current_rate > expected_rate do current_rate - expected_rate < error_rate else expected_rate - current_rate < error_rate end end defp start_workers(supervisor, params, rate_counter, period) do rate = Keyword.fetch!(params, :rate) id = Keyword.fetch!(params, :id) func = Keyword.fetch!(params, :func) {interval, initial_workers_number} = calculate_workers_number(rate, period) params = [ rate: rate, id: id, func: func, rate_counter: rate_counter, workers_number: initial_workers_number, interval: interval ] {:ok, pid} = DynamicSupervisor.start_child(supervisor, %{ id: :worker_supervisor, start: {WorkerSupervisor, :start_link, [params]}, type: :supervisor }) {pid, initial_workers_number} end defp calculate_workers_number(rate, period) do tps = 1_000 / period if rate / tps <= 1 do period = round(1000 / rate) {period, 1} else workers = round(rate / tps) {period, workers} end end end
lib/hornet/scheduler.ex
0.831656
0.438545
scheduler.ex
starcoder
defmodule CredoContrib.Check.FunctionBlockSyntax do @moduledoc """ `def …, do:` syntax should not be mixed with multiple `def … do … end`-style definitions. https://github.com/christopheradams/elixir_style_guide#multiple-function-defs """ @explanation [ check: @moduledoc, params: [ allow_single_kw_defs: "Set to `false` to only allow `def …, do:` syntax for functions with multiple heads" ] ] @default_params [allow_single_kw_defs: true] use Credo.Check, base_priority: :high, category: :readability def run(source_file, params \\ []) do issue_meta = IssueMeta.for(source_file, params) allow_single_kw_defs? = Params.get(params, :allow_single_kw_defs, @default_params) source_file |> Credo.Code.to_tokens() |> collect_definitions(%{}) |> Enum.reduce([], fn {{_, name}, %{long: long, short: _, line_no: line_no}}, issues when long > 1 -> new_issue = issue_for(:mixed_defs, issue_meta, line_no, name) [new_issue | issues] {_, %{long: _}}, issues -> issues {{_, name}, %{short: 1, line_no: line_no}}, issues -> if allow_single_kw_defs? do issues else new_issue = issue_for(:single_kw_def, issue_meta, line_no, name) [new_issue | issues] end _, issues -> issues end) end defp collect_definitions([], acc) do acc end defp collect_definitions( [ {:identifier, _, def_call}, {name_identifier, {line_no, _, _}, name} | rest ], acc ) when def_call in [:def, :defp] and name_identifier in [:identifier, :paren_identifier] do [block_start | rest] = Enum.drop_while(rest, fn {:do, _} -> false {:kw_identifier, _, :do} -> false _ -> true end) acc = case block_start do {:do, _} -> count_definition(acc, {def_call, name}, line_no, :long) {:kw_identifier, _, :do} -> count_definition(acc, {def_call, name}, line_no, :short) end collect_definitions(rest, acc) end defp collect_definitions([_ | rest], acc) do collect_definitions(rest, acc) end defp count_definition(acc, name, line_no, type) do case Map.fetch(acc, name) do :error -> Map.put(acc, name, %{type => 1, line_no: line_no}) {:ok, %{^type => count} = map} -> Map.put(acc, name, %{map | type => count + 1}) {:ok, map} -> Map.put(acc, name, Map.put(map, type, 1)) end end defp issue_for(:mixed_defs, issue_meta, line_no, trigger) do format_issue( issue_meta, message: "`def …, do:` syntax should not be mixed with multiple `def … do … end`-style definitions", line_no: line_no, trigger: trigger ) end defp issue_for(:single_kw_def, issue_meta, line_no, trigger) do format_issue( issue_meta, message: "`def …, do:` syntax should only be used for functions with multiple heads", line_no: line_no, trigger: trigger ) end end
lib/check/function_block_syntax.ex
0.688468
0.468061
function_block_syntax.ex
starcoder
defmodule TextBasedFPS.GameMap.Matrix do alias TextBasedFPS.{Direction, GameMap} alias TextBasedFPS.GameMap.{Coordinates, Objects} @type t :: list(list(item_t)) @type item_t :: :" " | :"#" | GameMap.Object.t() @spec set(t, Coordinates.t(), item_t) :: t def set(matrix, {x, y}, value) do updated_row = Enum.at(matrix, y) |> List.replace_at(x, value) List.replace_at(matrix, y, updated_row) end @spec clear(t, Coordinates.t()) :: t def clear(matrix, {x, y}) do set(matrix, {x, y}, :" ") end @spec has?(t, Coordinates.t()) :: boolean def has?(matrix, {x, y}) do at(matrix, {x, y}) != nil end @spec wall_at?(t, Coordinates.t()) :: boolean def wall_at?(matrix, {x, y}) do at(matrix, {x, y}) == :"#" end @spec object_at(t, Coordinates.t()) :: GameMap.Object.t() | nil def object_at(matrix, {x, y}) do object = at(matrix, {x, y}) if Objects.object?(object), do: object, else: nil end @spec object_at?(t, Coordinates.t()) :: boolean def object_at?(matrix, {x, y}) do object_at(matrix, {x, y}) != nil end @spec player_at(t, Coordinates.t()) :: Objects.Player.t() | nil def player_at(matrix, {x, y}) do player = at(matrix, {x, y}) case player do %TextBasedFPS.GameMap.Objects.Player{} -> player _ -> nil end end @spec player_at(t, Coordinates.t(), Player.key_t()) :: Objects.Player.t() | nil def player_at(matrix, {x, y}, player_key) do player = player_at(matrix, {x, y}) if player && player.player_key == player_key, do: player, else: nil end @spec player_at?(t, Coordinates.t()) :: boolean def player_at?(matrix, {x, y}) do player_at(matrix, {x, y}) != nil end @spec player_at?(t, Coordinates.t(), Player.key_t()) :: boolean def player_at?(matrix, {x, y}, player_key) do player_at(matrix, {x, y}, player_key) != nil end @spec at(t, Coordinates.t()) :: item_t | nil def at(matrix, {x, y}) when x >= 0 and y >= 0 do row = Enum.at(matrix, y) get_col(row, x) end def at(_matrix, {_, _}), do: nil defp get_col(nil, _x), do: nil defp get_col(row, x), do: Enum.at(row, x) @spec clean(t) :: t def clean(matrix) do Enum.map(matrix, fn line -> Enum.map(line, &clean_position/1) end) end defp clean_position(:"#"), do: :"#" defp clean_position(_), do: :" " @doc """ Iterate on the map matrix from a coordinate towards a given direction until the end of the map. For each iteration, it'll call `fun`, which should return {:continue, acc} if it should proceed, or {:stop, acc} The final accumulated value will be returned. """ @spec iterate_towards(t, Coordinates.t(), Direction.t(), any, function) :: any def iterate_towards(matrix, {x, y}, direction, acc, fun) do result = handle_iteration_fun_call(matrix, {x, y}, direction, acc, fun) case result do {:continue, updated_acc, next_coordinate} -> iterate_towards(matrix, next_coordinate, direction, updated_acc, fun) {:stop, updated_acc, _} -> updated_acc end end defp handle_iteration_fun_call(matrix, {x, y}, direction, acc, fun) do next_coordinate = Direction.calculate_movement(direction, {x, y}) if has?(matrix, next_coordinate) do {action, updated_acc} = fun.(next_coordinate, acc) {action, updated_acc, next_coordinate} else {:stop, acc, next_coordinate} end end end
lib/text_based_fps/game_map/matrix.ex
0.847416
0.801703
matrix.ex
starcoder
defmodule Scribe do @moduledoc """ Pretty-print tables of structs and maps """ alias Scribe.Table @type data :: [] | [...] | term @typedoc ~S""" Options for configuring table output. - `:colorize` - When `false`, disables colored output. Defaults to `true` - `:data` - Defines table headers - `:device` - Where to print (defaults to STDOUT) - `:style` - Style callback module. Defaults to `Scribe.Style.Default` - `:width` - Defines table width. Defaults to `:infinite` """ @type format_opts :: [ colorize: boolean, data: [...], style: module, width: integer ] @doc ~S""" Enables/disables auto-inspect override. If true, Scribe will override `inspect/2` for maps and structs, printing them as tables. ## Examples iex> Scribe.auto_inspect(true) :ok """ @spec auto_inspect(boolean) :: :ok def auto_inspect(inspect?) do Application.put_env(:scribe, :auto_inspect, inspect?) end @doc ~S""" Returns true if Scribe is overriding `Inspect`. ## Examples iex> Scribe.auto_inspect? true """ def auto_inspect? do compile_auto_inspect?() and Application.get_env(:scribe, :auto_inspect, false) end @doc false def compile_auto_inspect? do Application.get_env(:scribe, :compile_auto_inspect, false) end @doc ~S""" Prints a table from given data. ## Examples iex> print([]) :ok iex> Scribe.print(%{key: :value, test: 1234}, colorize: false) +----------+---------+ | :key | :test | +----------+---------+ | :value | 1234 | +----------+---------+ :ok """ @spec print(data, format_opts) :: :ok def print(_results, opts \\ []) def print([], _opts), do: :ok def print(results, opts) do dev = opts |> Keyword.get(:device, :stdio) results = results |> format(opts) dev |> IO.puts(results) end def console(results, opts \\ []) do results |> format(opts) |> Pane.console() end @doc ~S""" Prints a table from given data and returns the data. Useful for inspecting pipe chains. ## Examples iex> Scribe.inspect([]) [] iex> Scribe.inspect(%{key: :value, test: 1234}, colorize: false) +----------+---------+ | :key | :test | +----------+---------+ | :value | 1234 | +----------+---------+ %{test: 1234, key: :value} """ @spec inspect(term, format_opts) :: term def inspect(results, opts \\ []) do print(results, opts) results end @doc ~S""" Formats data into a printable table string. ## Examples iex> format([]) :ok iex> format(%{test: 1234}, colorize: false) "+---------+\n| :test |\n+---------+\n| 1234 |\n+---------+\n" """ @spec format([] | [...] | term) :: String.t() | :ok def format(_results, opts \\ []) def format([], _opts), do: :ok def format(results, opts) when not is_list(results) do format([results], opts) end def format(results, opts) do keys = fetch_keys(results, opts[:data]) headers = map_string_values(keys) data = Enum.map(results, &map_string_values(&1, keys)) table = [headers | data] Table.format(table, Enum.count(table), Enum.count(keys), opts) end defp map_string_values(keys), do: Enum.map(keys, &string_value(&1)) defp map_string_values(row, keys), do: Enum.map(keys, &string_value(row, &1)) defp string_value(%{name: name, key: _key}) do name end defp string_value(map, %{name: _name, key: key}) when is_function(key) do map |> key.() end defp string_value(map, %{name: _name, key: key}) do map |> Map.get(key) end defp fetch_keys([first | _rest], nil), do: fetch_keys(first) defp fetch_keys(_list, opts), do: process_headers(opts) defp process_headers(opts) do for opt <- opts do case opt do {name, key} -> %{name: name, key: key} key -> %{name: key, key: key} end end end defp fetch_keys(map) do map |> Map.keys() |> process_headers() end end
lib/scribe.ex
0.845321
0.600364
scribe.ex
starcoder
defmodule Moeda do @moduledoc """ [![Build Status](https://travis-ci.org/ramondelemos/ex_dinheiro.svg?branch=master)](https://travis-ci.org/ramondelemos/ex_dinheiro?branch=master) [![Coverage Status](https://coveralls.io/repos/github/ramondelemos/ex_dinheiro/badge.svg?branch=master)](https://coveralls.io/github/ramondelemos/ex_dinheiro?branch=master) """ alias Moeda.Moedas defstruct [:name, :symbol, :alpha_code, :num_code, :exponent] @typedoc """ Type that represents Moeda struct with: :name as String.t that represents the name of the currency. :symbol as String.t that represents symbol of the currency. :alpha_code as String.t that represents the alphabetic ISO 4217 code. :num_code as integer that represents the numeric ISO 4217 code. Where possible the 3 digit numeric code is the same as the numeric country code. :exponent as integer that represents the exponent of the currency. """ @type t :: %__MODULE__{ name: String.t(), symbol: String.t(), alpha_code: String.t(), num_code: integer, exponent: integer } @spec find!(String.t() | atom) :: t @doc """ Return a map from an atom or string that represents an ISO 4217 code. ## Examples iex> Moeda.find!(:BRL) %Moeda{name: "Brazilian Real", symbol: 'R$', alpha_code: "BRL", num_code: 986, exponent: 2} iex> Moeda.find!(986) %Moeda{name: "Brazilian Real", symbol: 'R$', alpha_code: "BRL", num_code: 986, exponent: 2} iex> Moeda.find!(:NONE) ** (ArgumentError) 'NONE' does not represent an ISO 4217 code """ def find!(num_code) when is_integer(num_code) do num_code |> do_find end def find!(alpha_code) when is_atom(alpha_code) do alpha_code |> Atom.to_string() |> String.upcase() |> String.to_atom() |> do_find end def find!(alpha_code) when is_binary(alpha_code) do alpha_code |> String.upcase() |> String.to_atom() |> do_find end defp do_find(alpha_code) when is_atom(alpha_code) do unofficial_currencies = get_unofficial_currencies() currencies = Moedas.get_currencies() alpha_code |> do_find(unofficial_currencies, currencies) end defp do_find(num_code) when is_integer(num_code) do unofficial_currencies = get_unofficial_currencies() |> Enum.map(fn {_key, currency} -> {currency.num_code, currency} end) |> Map.new() currencies = Moedas.get_currencies_by_num_code() num_code |> do_find(unofficial_currencies, currencies) end defp do_find(code, unofficial_currencies, currencies) do currency = unofficial_currencies[code] result = if currency do currency else currencies |> Map.get(code) end unless result, do: raise( ArgumentError, message: "'#{code}' does not represent an ISO 4217 code" ) raise_if_is_not_moeda(result, code) result end defp get_unofficial_currencies do :ex_dinheiro |> Application.get_env(:unofficial_currencies, %{}) |> Enum.map(fn {key, currency} -> {key, currency} end) |> Enum.filter(fn {key, currency} -> with true <- is_atom(key), {true, _} <- is_moeda(currency) do true else _ -> false end end) |> Map.new() end defp is_moeda( %__MODULE__{ name: n, symbol: s, alpha_code: i, num_code: c, exponent: e } = m ) when is_binary(n) and is_list(s) and is_binary(i) and is_integer(c) and is_integer(e), do: {true, m} defp is_moeda(value), do: {false, value} defp raise_if_is_not_moeda(value, alpha_code) do case is_moeda(value) do {true, _} -> true {false, _} -> raise( ArgumentError, message: ":#{alpha_code} must to be associated to a Moeda struct" ) end end @spec find(String.t() | atom) :: {:ok, t} | {:error, String.t()} @doc """ Return a map from an atom or string that represents an ISO 4217 code. ## Examples iex> Moeda.find(:BRL) {:ok, %Moeda{name: "Brazilian Real", symbol: 'R$', alpha_code: "BRL", num_code: 986, exponent: 2}} iex> Moeda.find("BRL") {:ok, %Moeda{name: "Brazilian Real", symbol: 'R$', alpha_code: "BRL", num_code: 986, exponent: 2}} iex> Moeda.find(986) {:ok, %Moeda{name: "Brazilian Real", symbol: 'R$', alpha_code: "BRL", num_code: 986, exponent: 2}} iex> Moeda.find("NONE") {:error, "'NONE' does not represent an ISO 4217 code"} Its function ignore case sensitive. ## Examples iex> Moeda.find(:brl) {:ok, %Moeda{name: "Brazilian Real", symbol: 'R$', alpha_code: "BRL", num_code: 986, exponent: 2}} iex> Moeda.find("brl") {:ok, %Moeda{name: "Brazilian Real", symbol: 'R$', alpha_code: "BRL", num_code: 986, exponent: 2}} Is possible to work with no official ISO currency code adding it in the system Mix config. ## Examples iex> Moeda.find(:XBT) {:error, "'XBT' does not represent an ISO 4217 code"} iex> currencies = %{ XBT: %Moeda{name: "Bitcoin", symbol: '฿', alpha_code: "XBT", num_code: 0, exponent: 8} } iex> Application.put_env(:ex_dinheiro, :unofficial_currencies, currencies) iex> Moeda.find("xbt") {:ok, %Moeda{name: "Bitcoin", symbol: '฿', alpha_code: "XBT", num_code: 0, exponent: 8}} Is possible to override some official ISO currency code adding it in the system Mix config. ## Examples iex> Moeda.find(:BRL) {:ok, %Moeda{name: "Brazilian Real", symbol: 'R$', alpha_code: "BRL", num_code: 986, exponent: 2}} iex> currencies = %{ BRL: %Moeda{name: "Moeda do Brasil", symbol: 'BR$', alpha_code: "BRL", num_code: 986, exponent: 4}, USD: %Moeda{name: "Moeda do EUA", symbol: 'US$', alpha_code: "USD", num_code: 986, exponent: 3} } iex> Application.put_env(:ex_dinheiro, :unofficial_currencies, currencies) iex> Moeda.find(:BRL) {:ok, %Moeda{name: "Moeda do Brasil", symbol: 'BR$', alpha_code: "BRL", num_code: 986, exponent: 4}} iex> Moeda.find(:USD) {:ok, %Moeda{name: "Moeda do EUA", symbol: 'US$', alpha_code: "USD", num_code: 986, exponent: 3}} iex> Application.delete_env(:ex_dinheiro, :unofficial_currencies) iex> Moeda.find(:BRL) {:ok, %Moeda{name: "Brazilian Real", symbol: 'R$', alpha_code: "BRL", num_code: 986, exponent: 2}} Be careful setting new currencies to Mix config. ## Examples iex> Moeda.find(:XBT) {:error, "'XBT' does not represent an ISO 4217 code"} iex> currencies = %{ XBT: %{name: "Bitcoin", symbol: '฿', alpha_code: "XBT", num_code: 0, exponent: 8} } iex> Application.put_env(:ex_dinheiro, :unofficial_currencies, currencies) iex> Moeda.find(:XBT) {:error, "'XBT' does not represent an ISO 4217 code"} iex> currencies = %{ XBT: %Moeda{name: "Bitcoin", symbol: '฿', alpha_code: "XBT", num_code: 0, exponent: 8} } iex> Application.put_env(:ex_dinheiro, :unofficial_currencies, currencies) iex> Moeda.find("xbt") {:ok, %Moeda{name: "Bitcoin", symbol: '฿', alpha_code: "XBT", num_code: 0, exponent: 8}} """ def find(alpha_code) when is_atom(alpha_code) or is_binary(alpha_code) or is_integer(alpha_code) do {:ok, find!(alpha_code)} rescue e -> {:error, e.message} end @spec get_atom!(String.t() | atom) :: atom @doc """ Return an atom from a value that represents an ISO 4217 code. ## Examples iex> Moeda.get_atom!(:BRL) :BRL iex> Moeda.get_atom!("BRL") :BRL iex> Moeda.get_atom!(:NONE) ** (ArgumentError) 'NONE' does not represent an ISO 4217 code Its function ignore case sensitive. ## Examples iex> Moeda.get_atom!(:brl) :BRL iex> Moeda.get_atom!("brl") :BRL """ def get_atom!(alpha_code) do currency = find!(alpha_code) currency.alpha_code |> String.upcase() |> String.to_atom() end @spec get_atom(String.t() | atom) :: {:ok, atom} @doc """ Return an atom from a value that represents an ISO 4217 code. ## Examples iex> Moeda.get_atom(:BRL) {:ok, :BRL} iex> Moeda.get_atom(:NONE) {:error, "'NONE' does not represent an ISO 4217 code"} """ def get_atom(alpha_code) do {:ok, get_atom!(alpha_code)} rescue e -> {:error, e.message} end @spec get_factor!(String.t() | atom) :: float @doc """ Return a multiplication factor from an ISO 4217 code. ## Examples iex> Moeda.get_factor!(:BRL) 100.0 iex> Moeda.get_factor!("BRL") 100.0 iex> Moeda.get_factor!(:NONE) ** (ArgumentError) 'NONE' does not represent an ISO 4217 code Its function ignore case sensitive. ## Examples iex> Moeda.get_factor!(:brl) 100.0 iex> Moeda.get_factor!("brl") 100.0 """ def get_factor!(alpha_code) do currency = find!(alpha_code) :math.pow(10, currency.exponent) end @spec get_factor(String.t() | atom) :: {:ok, float} | {:error, String.t()} @doc """ Return a multiplication factor from an ISO 4217 code. ## Examples iex> Moeda.get_factor(:BRL) {:ok, 100.0} iex> Moeda.get_factor(:NONE) {:error, "'NONE' does not represent an ISO 4217 code"} """ def get_factor(alpha_code) do {:ok, get_factor!(alpha_code)} rescue e -> {:error, e.message} end @spec to_string(String.t() | atom, float, Keywords.t()) :: {:ok, String.t()} | {:error, String.t()} @doc """ Return a formated string from a ISO 4217 code and a float value. ## Examples iex> Moeda.to_string(:BRL, 100.0) {:ok, "R$ 100,00"} iex> Moeda.to_string(:NONE, 1000.5) {:error, "'NONE' does not represent an ISO 4217 code"} """ def to_string(currency, valor, opts \\ []) do {:ok, to_string!(currency, valor, opts)} rescue e -> {:error, e.message} end @spec to_string!(String.t() | atom, float, Keywords.t()) :: String.t() @doc """ Return a formated string from a ISO 4217 code and a float value. ## Examples iex> Moeda.to_string!(:BRL, 100.0) "R$ 100,00" iex> Moeda.to_string!("BRL", 1000.5) "R$ 1.000,50" iex> Moeda.to_string!(:BRL, -1.0) "R$ -1,00" iex> Moeda.to_string!(:NONE, 1000.5) ** (ArgumentError) 'NONE' does not represent an ISO 4217 code Its function ignore case sensitive. ## Examples iex> Moeda.to_string!(:bRl, 100.0) "R$ 100,00" iex> Moeda.to_string!("BrL", 1000.5) "R$ 1.000,50" Using options-style parameters you can change the behavior of the function. - `thousand_separator` - default `"."`, set the thousand separator. - `decimal_separator` - default `","`, set the decimal separator. - `display_currency_symbol` - default `true`, put to `false` to hide de currency symbol. - `display_currency_code` - default `false`, put to `true` to display de currency ISO 4217 code. ## Exemples iex> Moeda.to_string!(:USD, 1000.5, thousand_separator: ",", decimal_separator: ".") "$ 1,000.50" iex> Moeda.to_string!(:USD, 1000.5, display_currency_symbol: false) "1.000,50" iex> Moeda.to_string!(:USD, 1000.5, display_currency_code: true) "$ 1.000,50 USD" iex> Moeda.to_string!(:USD, 1000.5, display_currency_code: true, display_currency_symbol: false) "1.000,50 USD" The default values also can be set in the system Mix config. ## Example: iex> Application.put_env(:ex_dinheiro, :thousand_separator, ",") iex> Application.put_env(:ex_dinheiro, :decimal_separator, ".") iex> Moeda.to_string!(:USD, 1000.5) "$ 1,000.50" iex> Application.put_env(:ex_dinheiro, :display_currency_symbol, false) iex> Moeda.to_string!(:USD, 5000.5) "5,000.50" iex> Application.put_env(:ex_dinheiro, :display_currency_code, true) iex> Moeda.to_string!(:USD, 10000.0) "10,000.00 USD" The options-style parameters override values in the system Mix config. ## Example: iex> Application.put_env(:ex_dinheiro, :thousand_separator, ",") iex> Application.put_env(:ex_dinheiro, :decimal_separator, ".") iex> Moeda.to_string!(:USD, 1000.5) "$ 1,000.50" iex> Moeda.to_string!(:BRL, 1000.5, thousand_separator: ".", decimal_separator: ",") "R$ 1.000,50" """ def to_string!(currency, valor, opts \\ []) do m = find!(currency) unless is_float(valor), do: raise(ArgumentError, message: "Value '#{valor}' must be float") {thousand_separator, decimal_separator, display_currency_symbol, display_currency_code} = get_config(opts) parts = valor |> :erlang.float_to_binary(decimals: m.exponent) |> String.split(".") thousands = parts |> List.first() |> String.reverse() |> String.codepoints() |> format_thousands(thousand_separator) |> String.reverse() decimals = if m.exponent > 0 do Enum.join([decimal_separator, List.last(parts)]) else "" end currency_symbol = if display_currency_symbol do m.symbol else "" end currency_code = if display_currency_code do m.alpha_code else "" end [currency_symbol, " ", thousands, decimals, " ", currency_code] |> Enum.join() |> String.trim() end defp get_config(opts) do conf_thousand_separator = Application.get_env(:ex_dinheiro, :thousand_separator, ".") conf_decimal_separator = Application.get_env(:ex_dinheiro, :decimal_separator, ",") conf_display_currency_symbol = Application.get_env(:ex_dinheiro, :display_currency_symbol, true) conf_display_currency_code = Application.get_env(:ex_dinheiro, :display_currency_code, false) thousand_separator = Keyword.get(opts, :thousand_separator, conf_thousand_separator) decimal_separator = Keyword.get(opts, :decimal_separator, conf_decimal_separator) display_currency_symbol = Keyword.get(opts, :display_currency_symbol, conf_display_currency_symbol) display_currency_code = Keyword.get(opts, :display_currency_code, conf_display_currency_code) {thousand_separator, decimal_separator, display_currency_symbol, display_currency_code} end defp format_thousands([head | tail], separator, opts \\ []) do position = Keyword.get(opts, :position, 1) num = if rem(position, 3) == 0 and head != "-" and tail != [] do Enum.join([head, separator]) else head end if tail != [] do [num, format_thousands(tail, separator, position: position + 1)] |> Enum.join() else num end end end
lib/moeda.ex
0.922133
0.641113
moeda.ex
starcoder
defmodule Cluster.Strategy.GoogleAppEngine do @moduledoc """ Clustering strategy for Google App Engine. This strategy checks for the list of app versions that are currently receiving HTTP. For each version that is listed, the list of instances running for that version are fetched. Once all of the instances have been received, they attempt to connect to each other. **Note**: This strategy only connects nodes that are able to receive HTTP traffic. Here's an example configuration: ```elixir config :libcluster, topologies: [ my_app: [ strategy: Cluster.Strategy.GoogleAppEngine, config: [ polling_interval: 10_000 ] ] ] ``` ## Configurable Options Options can be set for the strategy under the `:config` key when defining the topology. * `:polling_interval` - Interval for checking for the list of running instances. Defaults to `10_000` ## Application Setup ### Google Cloud Enable the **App Engine Admin API** for your application's Google Cloud Project. Follow the guide on [enabling APIs](https://cloud.google.com/apis/docs/enable-disable-apis). ### Release Configuration Update your release's `vm.args` file to include the following lines. ``` ## Name of the node -name <%= release_name%>@${GAE_INSTANCE}.c.${GOOGLE_CLOUD_PROJECT}.internal ## Limit distributed erlang ports to a single port -kernel inet_dist_listen_min 9999 -kernel inet_dist_listen_max 9999 ``` ### GAE Configuration File Update the `app.yaml` configuration file for Google App Engine. ```yaml env_variables: REPLACE_OS_VARS: true network: forwarded_ports: # epmd - 4369 # erlang distribution - 9999 ``` """ use GenServer use Cluster.Strategy alias Cluster.Strategy.State @default_polling_interval 10_000 @access_token_path 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token' def start_link(args) do GenServer.start_link(__MODULE__, args) end @impl true def init([%State{} = state]) do {:ok, load(state)} end @impl true def handle_info(:timeout, state) do handle_info(:load, state) end def handle_info(:load, %State{} = state) do {:noreply, load(state)} end def handle_info(_, state) do {:noreply, state} end defp load(%State{} = state) do connect = state.connect list_nodes = state.list_nodes topology = state.topology nodes = get_nodes(state) Cluster.Strategy.connect_nodes(topology, connect, list_nodes, nodes) Process.send_after(self(), :load, polling_interval(state)) state end defp polling_interval(%State{config: config}) do Keyword.get(config, :polling_interval, @default_polling_interval) end defp get_nodes(%State{}) do project_id = System.get_env("GOOGLE_CLOUD_PROJECT") instances = get_running_instances(project_id) release_name = System.get_env("REL_NAME") Enum.map(instances, & :"#{release_name}@#{&1}.c.#{project_id}.internal") end defp get_running_instances(project_id) do service_id = System.get_env("GAE_SERVICE") versions = get_running_versions(project_id, service_id) Enum.flat_map(versions, &get_instances_for_version(project_id, service_id, &1)) end defp get_running_versions(project_id, service_id) do access_token = access_token() headers = [{'Authorization', 'Bearer #{access_token}'}] api_url = 'https://appengine.googleapis.com/v1/apps/#{project_id}/services/#{service_id}' case :httpc.request(:get, {api_url, headers}, [], []) do {:ok, {{_, 200, _}, _headers, body}} -> %{"split" => %{"allocations" => allocations}} = Jason.decode!(body) Map.keys(allocations) end end defp get_instances_for_version(project_id, service_id, version) do access_token = access_token() headers = [{'Authorization', 'Bearer #{access_token}'}] api_url = 'https://appengine.googleapis.com/v1/apps/#{project_id}/services/#{service_id}/versions/#{version}/instances' case :httpc.request(:get, {api_url, headers}, [], []) do {:ok, {{_, 200, _}, _headers, body}} -> handle_instances(Jason.decode!(body)) end end defp handle_instances(%{"instances" => instances}) do instances |> Enum.filter(& &1["vmStatus"] == "RUNNING") |> Enum.map(& &1["id"]) end defp handle_instances(_), do: [] defp access_token do headers = [{'Metadata-Flavor', 'Google'}] case :httpc.request(:get, {@access_token_path, headers}, [], []) do {:ok, {{_, 200, _}, _headers, body}} -> %{"access_token" => token} = Jason.decode!(body) token end end end
lib/google_app_engine.ex
0.851181
0.79158
google_app_engine.ex
starcoder
defmodule Calamity do @moduledoc """ Documentation for `Calamity`. """ alias Calamity.Command alias Calamity.Stack alias Calamity.AggregateStore alias Calamity.ProcessManagerStore alias Calamity.VersionStore require Logger @doc """ Executes a command, and return the updated aggregates, process managers, and event store. Calamity is protocol-driven, which means that all arguments to this function only need to implement the correct protocols. - `command` must implement `Calamity.Command` - `aggregates` must implement `Calamity.AggregateStore`, and contain structs implementing `Calamity.Aggregate` - `process_manager_modules` must implement `Enumerable` and contain modules - `process_managers` must implement `Calamity.ProcessManagerStore`. - `event_store` must implement `Calamity.EventStore` """ def dispatch(stack, command) do Logger.debug("Processing command #{inspect(command, pretty: true)}") {stack, events} = execute(stack, command) Logger.debug("Aggregate emitted events #{inspect(events, pretty: true)}") {new_commands, new_process_managers} = Enum.reduce(events, {[], stack.process_manager_store}, fn event, {commands, store} -> {new_commands, store} = ProcessManagerStore.handle_event(store, event, stack.process_manager_mods) {new_commands ++ commands, store} end) Logger.debug("Process managers emitted commands #{inspect(new_commands, pretty: true)}") stack = %Stack{stack | process_manager_store: new_process_managers } Enum.reduce(new_commands, stack, fn new_command, stack -> dispatch(stack, new_command) end) end defp execute(stack, command) do {events, aggregate_store} = Calamity.AggregateStore.dispatch(stack.aggregate_store, command) {agg_mod, agg_id} = Command.aggregate(command) agg_version = Access.get(stack.aggregate_versions, agg_id, 0) expected_version = if agg_version == 0, do: :no_stream, else: agg_version Calamity.EventStore.append(stack.event_store, agg_id, events, expected_version: expected_version) |> case do {:ok, event_store} -> stack = %Stack{stack | event_store: event_store, aggregate_store: aggregate_store, aggregate_versions: VersionStore.increment_version(stack.aggregate_versions, agg_id, Enum.count(events)) } {stack, events} {:error, :stream_exists} -> stack |> sync_aggregate(agg_mod, agg_id) |> execute(command) end end defp sync_aggregate(stack, agg_mod, agg_id) do agg_version = Access.get(stack.aggregate_versions, agg_id, 0) missed_events = Calamity.EventStore.stream(stack.event_store, agg_id, start_version: agg_version) |> Enum.map(&elem(&1, 0)) if Enum.count(missed_events) > 0 do Logger.debug("Catching up aggregate #{inspect agg_id} with #{Enum.count(missed_events)} new events") end apply_events(stack, agg_mod, agg_id, missed_events) end defp apply_events(stack, agg_mod, agg_id, events) do agg_store = AggregateStore.apply(stack.aggregate_store, agg_mod, agg_id, events) new_version_store = VersionStore.increment_version(stack.aggregate_versions, agg_id, Enum.count(events)) %Stack{stack | aggregate_store: agg_store, aggregate_versions: new_version_store} end def aggregate do quote do use Calamity.Aggregate.Base end end def process_manager do quote do use Calamity.ProcessManager.Base end end defmacro __using__(which) when is_atom(which) do apply(__MODULE__, which, []) end end
lib/calamity.ex
0.780035
0.524273
calamity.ex
starcoder
defmodule Rihanna.Migration.Upgrade do @moduledoc """ A set of tools for upgrading an existing Rihanna jobs table. Rihanna stores jobs in a table in your database. The default table name is "rihanna_jobs". The name is configurable by either passing it as an argument to the functions below or setting `:jobs_table_name` in Rihanna's config. #### Using Ecto The easiest way to upgrade the database is with Ecto. Run `mix ecto.gen.migration upgrade_rihanna_jobs` and make your migration look like this: ```elixir defmodule MyApp.UpgradeRihannaJobs do use Rihanna.Migration.Upgrade end ``` Now you can run `mix ecto.migrate`. #### Without Ecto Ecto is not required to run Rihanna. If you want to upgrade the table yourself, without Ecto, take a look at either `statements/0` or `sql/0`. """ alias Rihanna.Migration.Upgrade defmacro __using__(opts) do table_name = Keyword.get(opts, :table_name, Rihanna.Config.jobs_table_name()) |> to_string quote do use Ecto.Migration def up do Enum.each(Upgrade.statements(unquote(table_name)), &execute/1) end def down do Enum.each(Upgrade.drop_statements(unquote(table_name)), &execute/1) end end end @doc """ Returns a list of SQL statements that will rollback the upgrade of Rihanna jobs table if executed sequentially. By default it takes the name of the table from the application config. You may optionally supply a table name as an argument if you want to override this. ## Examples > Rihanna.Migration.Upgrade.drop_statements [...] > Rihanna.Migration.Upgrade.drop_statements("my_alternative_table_name") [...] """ @spec drop_statements() :: list[String.t()] @spec drop_statements(String.t() | atom) :: list[String.t()] def drop_statements(table_name \\ Rihanna.Config.jobs_table_name()) do [ """ ALTER TABLE #{table_name} DROP COLUMN due_at; """, """ ALTER TABLE #{table_name} DROP COLUMN rihanna_internal_meta; """, """ ALTER TABLE #{table_name} DROP COLUMN priority; """, """ DO $$ BEGIN DROP INDEX IF EXISTS rihanna_jobs_priority_enqueued_at_id; DROP INDEX IF EXISTS #{table_name}_locking_index; CREATE INDEX IF NOT EXISTS #{table_name}_locking_index ON #{table_name} (priority ASC, due_at ASC, enqueued_at ASC, id ASC); END; $$ """ ] end @doc """ Returns a list of SQL statements that will upgrade the Rihanna jobs table if executed sequentially. By default it takes the name of the table from the application config. You may optionally supply a table name as an argument if you want to override this. ## Examples > Rihanna.Migration.Upgrade.statements [...] > Rihanna.Migration.Upgrade.statements("my_alternative_table_name") [...] """ @spec statements() :: list[String.t()] @spec statements(String.t() | atom) :: list[String.t()] def statements(table_name \\ Rihanna.Config.jobs_table_name()) when is_binary(table_name) or is_atom(table_name) do [ # Postgres versions earlier than v9.6 do not suppport `IF EXISTS` predicates # on alter table commands. For backwards compatibility we're using a try/catch # approach to add the `due_at` column idempotently. """ DO $$ BEGIN BEGIN ALTER TABLE #{table_name} ADD COLUMN due_at timestamp with time zone; ALTER TABLE #{table_name} ADD COLUMN rihanna_internal_meta jsonb NOT NULL DEFAULT '{}'; EXCEPTION WHEN duplicate_column THEN RAISE NOTICE 'column already exists in #{table_name}.'; END; END; $$ """, """ DO $$ BEGIN ALTER TABLE #{table_name} ADD COLUMN priority integer NOT NULL DEFAULT 50; EXCEPTION WHEN duplicate_column THEN RAISE NOTICE 'column already exists in #{table_name}.'; END; $$ """, """ DO $$ BEGIN DROP INDEX IF EXISTS #{table_name}_enqueued_at_id; DROP INDEX IF EXISTS #{table_name}_priority_enqueued_at_id; DROP INDEX IF EXISTS #{table_name}_locking_index; CREATE INDEX IF NOT EXISTS #{table_name}_locking_index ON #{table_name} (priority ASC, due_at ASC, enqueued_at ASC, id ASC); END; $$ """ ] end @doc """ Returns a string of semi-colon-terminated SQL statements that you can execute directly to upgrade the Rihanna jobs table. """ @spec sql(String.t() | atom) :: String.t() def sql(table_name \\ Rihanna.Config.jobs_table_name()) do Enum.join(statements(table_name), "\n") end end
lib/rihanna/migration/upgrade.ex
0.889132
0.668688
upgrade.ex
starcoder
defmodule PeopleSorter.Person do @moduledoc """ A Person is a struct that contains fields that describe a Person. """ alias PeopleSorter.Person @derive {Phoenix.Param, key: :email} @type t :: %__MODULE__{ last_name: String.t(), first_name: String.t(), email: String.t(), favorite_color: String.t(), date_of_birth: Date.t() } @derive {Jason.Encoder, only: [:last_name, :first_name, :email, :favorite_color, :date_of_birth]} defstruct [:last_name, :first_name, :email, :favorite_color, :date_of_birth] @spec new(String.t(), String.t(), String.t(), String.t(), Date.t()) :: t() def new(last_name, first_name, email, favorite_color, date_of_birth) do %__MODULE__{ last_name: last_name, first_name: first_name, email: email, favorite_color: favorite_color, date_of_birth: date_of_birth } end @doc """ Take a list of person attributes and build a Person Struct """ @spec new([String.t()]) :: t() def new(person_list) do with 5 <- Enum.count(person_list) do {last_name, _} = List.pop_at(person_list, 0) {first_name, _} = List.pop_at(person_list, 1) {email, _} = List.pop_at(person_list, 2) {color, _} = List.pop_at(person_list, 3) {dob, _} = List.pop_at(person_list, 4) case convert_date_to_dob(dob) do {:ok, date_of_birth} -> PeopleSorter.Person.new(last_name, first_name, email, color, date_of_birth) {:error, :invalid_date} -> IO.puts("invalid dob(#{dob}) for #{email}, skipped") nil end else _ -> nil end end @doc """ convert input format of Month/Day/Year to Elixir Date """ @spec convert_date_to_dob(String.t()) :: {:ok, Date.t()} | {:error, :invalid_date} def convert_date_to_dob(string_date) do with parse_results <- convert_string_parts_to_int_parts(string_date), false <- conversion_contains_errors?(parse_results) do parse_results |> remove_remainder_from_parse() |> create_date_from_date_parts() else _ -> {:error, :invalid_date} end end defp convert_string_parts_to_int_parts(date) do date |> String.split("/") |> Enum.map(&Integer.parse/1) end defp conversion_contains_errors?(int_parts) do Enum.any?(int_parts, fn item -> item == :error end) end defp remove_remainder_from_parse(int_parts) do Enum.map(int_parts, fn date_piece -> elem(date_piece, 0) end) end defp create_date_from_date_parts(date_parts) do {month, _} = List.pop_at(date_parts, 0) {day, _} = List.pop_at(date_parts, 1) {year, _} = List.pop_at(date_parts, 2) Date.new(year, month, day) end @doc """ Depending on the delimiter, split the line """ @spec parse_person_line(String.t()) :: [String.t()] def parse_person_line(line) do cond do String.contains?(line, "|") -> String.split(line, "|") String.contains?(line, ",") -> String.split(line, ",") true -> String.split(line, " ") end end @doc """ Format date for display """ def format_date(date) do "#{date.month}/#{date.month}/#{date.year}" end defimpl String.Chars do @doc """ Fornat Person for display, add plenty of padding """ def to_string(person) do last_name = String.pad_trailing(person.last_name, 30) first_name = String.pad_trailing(person.first_name, 20) email = String.pad_trailing(person.email, 40) favorite_color = String.pad_trailing(person.favorite_color, 20) date_of_birth = person.date_of_birth |> Person.format_date() |> String.pad_leading(11) "#{last_name} #{first_name} #{email} #{favorite_color} #{date_of_birth}" end end end
lib/people_sorter/person.ex
0.672762
0.451145
person.ex
starcoder
defmodule Blockchain.Transaction do @moduledoc """ Represents one transaction within the chain. There is a special "reward" transaction which has a fixed amount in it, and is sent from a special address. Those are set in the @reward and @rewarder constants. The transactions are signed by the sender using the Ed25519 algorithm. This way, transactions can't be modified third parties. """ @reward 1.0 @rewarder <<0::size(256)>> @typedoc """ A transaction with its sender and recipient address, the amount, the transaction creation timestamp (it doesn't have to be valid, it used to avoid replaying transactions) and the transaction signature. """ @type t :: %__MODULE__{ timestamp: integer, sender: Ed25519.key(), recipient: Ed25519.key(), amount: number, signature: Ed25519.signature() } defstruct timestamp: 0, sender: <<0::size(256)>>, recipient: <<0::size(256)>>, amount: 0.0, signature: <<0::size(512)>> @doc """ Create a new transaction and sign it """ @spec new( recipient :: Ed25519.key() | String.t(), amount :: float | integer | String.t(), priv :: Ed25519.key() | String.t() ) :: t() def new(recipient, amount, priv) when amount >= 0 and is_float(amount) and byte_size(priv) == 32 and byte_size(recipient) == 32 do sign( %__MODULE__{ timestamp: System.system_time(:nanoseconds), sender: Ed25519.derive_public_key(priv), recipient: recipient, amount: amount }, priv ) end def new(recipient, amount, priv) when is_integer(amount) do new(recipient, amount / 1, priv) end def new(recipient, amount, priv) when is_bitstring(amount) do # Parse amount formatted as float or integers amount = cond do String.contains?(amount, ["e", "E"]) -> String.to_float(amount) # Add leading and closing zeros to parse things like ".5" or "1." String.contains?(amount, ".") -> String.to_float("0#{amount}0") true -> String.to_integer(amount) end new(recipient, amount, priv) end def new(recipient, amount, priv) when byte_size(priv) == 32 do new(Base.url_decode64!(recipient), amount, priv) end def new(recipient, amount, priv) do new(recipient, amount, Base.url_decode64!(priv)) end @doc """ The binary payload of the transaction that will be signed """ @spec payload(transaction :: t()) :: binary def payload(%__MODULE__{ timestamp: timestamp, sender: sender, recipient: recipient, amount: amount }) do <<timestamp::unsigned-little-integer-size(64)>> <> sender <> recipient <> <<amount::float>> end @doc """ Sign a transaction with the given private key """ @spec sign(transaction :: t(), key :: Ed25519.key()) :: t() def sign(transaction, key) do signature = Ed25519.signature(payload(transaction), key, transaction.sender) %__MODULE__{transaction | signature: signature} end @doc """ Check if a transaction is valid (either the transaction is signed or it is a reward transaction) """ @spec valid?(transaction :: t()) :: boolean def valid?(%__MODULE__{sender: sender, amount: amount, signature: signature} = tx) do is_reward?(tx) or (Ed25519.valid_signature?(signature, payload(tx), sender) and amount >= 0) end @doc """ Create a reward transaction for the given recipient """ @spec reward(recipient :: Ed25519.key()) :: t() def reward(recipient) do %__MODULE__{ timestamp: System.system_time(:nanoseconds), sender: @rewarder, recipient: recipient, amount: @reward } end @doc """ Check if a given transaction is a reward """ @spec is_reward?(tx :: t()) :: boolean() def is_reward?(tx) do tx.sender == @rewarder and tx.amount == @reward end defp prune_accounts(accounts) do Map.drop(accounts, for({acc, amount} <- accounts, amount == 0.0, do: acc)) end @doc """ Run a list of transactions on an account and transactions cache """ @spec run( accounts :: %{binary() => float()}, hashes :: MapSet.t(binary()), transactions :: [t()] ) :: {:ok, %{binary() => float()}, MapSet.t(binary())} | {:error, t()} def run(accounts, hashes, []), do: {:ok, prune_accounts(accounts), hashes} def run(accounts, hashes, [tx | transactions]) do hash = hash(tx) cond do MapSet.member?(hashes, hash) -> {:error, tx} not valid?(tx) -> {:error, tx} is_reward?(tx) -> accounts = Map.update(accounts, tx.recipient, tx.amount, &(&1 + tx.amount)) hashes = MapSet.put(hashes, hash) run(accounts, hashes, transactions) tx.amount <= Map.get(accounts, tx.sender, 0.0) -> accounts = Map.update(accounts, tx.recipient, tx.amount, &(&1 + tx.amount)) accounts = Map.update(accounts, tx.sender, 0.0, &(&1 - tx.amount)) hashes = MapSet.put(hashes, hash) run(accounts, hashes, transactions) true -> {:error, tx} end end @doc """ Rollback a list of transactions on an account and transactions cache """ @spec rollback( accounts :: %{binary() => float()}, hashes :: MapSet.t(binary()), transactions :: [t()] ) :: {:ok, %{binary() => float()}, MapSet.t(binary())} | {:error, t()} def rollback(accounts, hashes, []), do: {:ok, prune_accounts(accounts), hashes} def rollback(accounts, hashes, [tx | transactions]) do hash = hash(tx) cond do not valid?(tx) -> {:error, tx} MapSet.member?(hashes, hash) and tx.amount <= Map.get(accounts, tx.recipient, 0.0) -> accounts = Map.update(accounts, tx.recipient, tx.amount, &(&1 - tx.amount)) accounts = if is_reward?(tx), do: accounts, else: Map.update(accounts, tx.sender, 0.0, &(&1 + tx.amount)) hashes = MapSet.delete(hashes, hash) rollback(accounts, hashes, transactions) true -> {:error, tx} end end @doc """ Compute a unique hash for this transaction/transactions list """ @spec hash(tx :: t() | [t()]) :: binary() def hash(tx) do :crypto.hash_init(:sha256) |> hash(tx) |> :crypto.hash_final() end @spec hash(sha :: term(), tx :: t() | [t()]) :: term() def hash(sha, transactions) when is_list(transactions) do Enum.reduce(transactions, sha, fn t, sha -> hash(sha, t) end) end def hash(sha, transaction) do sha |> :crypto.hash_update(payload(transaction)) |> :crypto.hash_update(transaction.signature) end end defimpl String.Chars, for: Blockchain.Transaction do def to_string(%{sender: snd, recipient: rcp, amount: amt, signature: sig, timestamp: ts} = tx) do [snd, rcp, sig] = Enum.map([snd, rcp, sig], &Base.url_encode64(&1, padding: false)) date = DateTime.from_unix!(ts, :nanoseconds) if Blockchain.Transaction.is_reward?(tx) do "#{date}: REWARD -(#{amt})-> #{rcp}" else "#{date}: #{snd} -(#{amt})-> #{rcp} (sig: #{sig})" end end end
apps/blockchain/lib/blockchain/transaction.ex
0.910473
0.601242
transaction.ex
starcoder
defmodule BMP280.Calc do alias BMP280.{Calibration, Measurement} @moduledoc false @doc """ Convert raw sensor reports to temperature, pressure and altitude measurements """ @spec raw_to_measurement(Calibration.t(), number(), map()) :: Measurement.t() def raw_to_measurement(%Calibration{} = cal, sea_level_pa, raw) do temp = raw_to_temperature(cal, raw.raw_temperature) pressure = raw_to_pressure(cal, temp, raw.raw_pressure) altitude = pressure_to_altitude(pressure, sea_level_pa) humidity = raw_to_humidity(cal, temp, Map.get(raw, :raw_humidity)) %Measurement{ temperature_c: temp, pressure_pa: pressure, altitude_m: altitude, humidity_rh: humidity } end defp raw_to_temperature(cal, raw_temp) do var1 = (raw_temp / 16384 - cal.dig_T1 / 1024) * cal.dig_T2 var2 = (raw_temp / 131_072 - cal.dig_T1 / 8192) * (raw_temp / 131_072 - cal.dig_T1 / 8192) * cal.dig_T3 (var1 + var2) / 5120 end defp raw_to_pressure(cal, temp, raw_pressure) do t_fine = temp * 5120 var1 = t_fine / 2 - 64000 var2 = var1 * var1 * cal.dig_P6 / 32768 var2 = var2 + var1 * cal.dig_P5 * 2 var2 = var2 / 4 + cal.dig_P4 * 65536 var1 = (cal.dig_P3 * var1 * var1 / 524_288 + cal.dig_P2 * var1) / 524_288 var1 = (1 + var1 / 32768) * cal.dig_P1 p = 1_048_576 - raw_pressure p = (p - var2 / 4096) * 6250 / var1 var1 = cal.dig_P9 * p * p / 2_147_483_648 var2 = p * cal.dig_P8 / 32768 p = p + (var1 + var2 + cal.dig_P7) / 16 p end defp raw_to_humidity(%{has_humidity?: true} = cal, temp, raw_humidity) when is_integer(raw_humidity) do t_fine = temp * 5120 var_H = t_fine - 76800 var_H = (raw_humidity - (cal.dig_H4 * 64 + cal.dig_H5 / 16384 * var_H)) * (cal.dig_H2 / 65536 * (1 + cal.dig_H6 / 67_108_864 * var_H * (1 + cal.dig_H3 / 67_108_864 * var_H))) var_H = var_H * (1 - cal.dig_H1 * var_H / 524_288) min(100, max(0, var_H)) end defp raw_to_humidity(_cal, _temp, _raw), do: 0 @doc """ Calculate the altitude using the current pressure and sea level pressure """ @spec pressure_to_altitude(number(), number()) :: float() def pressure_to_altitude(p, sea_level_pa) do 44330 * (1 - :math.pow(p / sea_level_pa, 1 / 5.255)) end @doc """ Calculate the sea level pressure based on the specified altitude """ @spec sea_level_pressure(number(), number()) :: float() def sea_level_pressure(p, altitude) do p / :math.pow(1 - altitude / 44330, 5.255) end end
lib/bmp280/calc.ex
0.761272
0.539347
calc.ex
starcoder
defmodule Gradient.ElixirChecker do @moduledoc ~s""" Provide checks specific to Elixir that complement type checking delivered by Gradient. Options: - {`ex_check`, boolean()}: whether to use checks specific only to Elixir. """ @spec check([:erl_parse.abstract_form()], keyword()) :: [{:file.filename(), any()}] def check(forms, opts) do if Keyword.get(opts, :ex_check, true) do check_spec(forms) else [] end end @doc ~s""" Check if all specs are exactly before the function that they specify and if there is only one spec per function clause. Correct spec locations: ``` @spec convert(integer()) :: float() def convert(int) when is_integer(int), do: int / 1 @spec convert(atom()) :: binary() def convert(atom) when is_atom(atom), do: to_string(atom) ``` Incorrect spec locations: - More than one spec above function clause. ``` @spec convert(integer()) :: float() @spec convert(atom()) :: binary() def convert(int) when is_integer(int), do: int / 1 def convert(atom) when is_atom(atom), do: to_string(atom) ``` - Spec name doesn't match the function name. ``` @spec last_two(atom()) :: atom() def last_three(:ok) do :ok end ``` """ @spec check_spec([:erl_parse.abstract_form()]) :: [{:file.filename(), any()}] def check_spec([{:attribute, _, :file, {file, _}} | forms]) do forms |> Stream.filter(&is_fun_or_spec?/1) |> Stream.map(&simplify_form/1) |> Stream.concat() |> Stream.filter(&is_not_generated?/1) |> Enum.sort(&(elem(&1, 2) < elem(&2, 2))) |> Enum.reduce({nil, []}, fn {:fun, {n, :def}, _}, {{:spec, {sn, _}, _}, _} = acc when n == sn -> # skip clauses generated for default arguments acc {:fun, fna, _} = fun, {{:spec, {n, a} = sna, anno}, errors} when fna != sna -> # Spec name doesn't match the function name {fun, [{:spec_error, :wrong_spec_name, anno, n, a} | errors]} {:spec, {n, a}, anno} = s1, {{:spec, {n2, a2}, _}, errors} when n != n2 or a != a2 -> # Specs with diffrent name/arity are mixed {s1, [{:spec_error, :mixed_specs, anno, n, a} | errors]} x, {_, errors} -> {x, errors} end) |> elem(1) |> Enum.map(&{file, &1}) |> Enum.reverse() end # Filter out __info__ and other generated functions with the same name pattern def is_not_generated?({_, {name, _}, _}) do name_str = Atom.to_string(name) not (String.starts_with?(name_str, "__") and String.ends_with?(name_str, "__")) end def is_fun_or_spec?({:attribute, _, :spec, _}), do: true def is_fun_or_spec?({:function, _, _, _, _}), do: true def is_fun_or_spec?(_), do: false @spec simplify_form(:erl_parse.abstract_form()) :: Enumerable.t({:spec | :fun, {atom(), integer()}, :erl_anno.anno()}) def simplify_form({:attribute, _, :spec, {{name, arity}, types}}) do Stream.map(types, &{:spec, {name, arity}, elem(&1, 1)}) end def simplify_form({:function, anno, name, arity, clauses}) do Stream.map(clauses, &default_args_clause(anno, name, arity, &1)) end def default_args_clause(anno, name, arity, clause) do with {:clause, ^anno, vars, [], [{:call, ^anno, {:atom, ^anno, ^name}, _}]} <- clause, true <- all_vars_generated?(vars) do {:fun, {name, :def}, anno} else _ -> {:fun, {name, arity}, elem(clause, 1)} end end def all_vars_generated?(vars) do Enum.all?(vars, fn {:var, anno, _} -> :erl_anno.generated(anno) end) end end
lib/gradient/elixir_checker.ex
0.824815
0.870487
elixir_checker.ex
starcoder
defmodule HLDSRcon do @moduledoc """ A library for creating Half-Life Dedicated Server (a.k.a "HLDS") remote connections (a.k.a "rcon") and executing commands. Uses a `DynamicSupervisor` for connecting clients. If you want to manage the rcon client supervision yourself you can use the `HLDSRcon.RconClient` module directly. ## Examples If you are running a server on the localhost with the HLDS `rcon_password` set to `<PASSWORD>`, you would connect; ``` {:ok, _pid} = HLDSRcon.connect("127.0.0.1", "Foo") ``` Now that the connection is established, you can run commands; ``` {:ok, _response} = HLDSRcon.command("127.0.0.1", "echo Test") ``` Some common command responses are processed into structs for ease of use; ``` {:ok, %HLDSRcon.Stats{} = stats} = HLDSRcon.command("127.0.0.1", "stats") ``` These common commands also have entry points in this module, e.g. instead of calling command to run stats as above, we could simply call; ``` { :ok, %HLDSRcon.Stats{ cpu: 11.33, fps: 921.12, in: 0.0, out: 0.0, players: 0, uptime: 895, users: 0 } } = HLDSRcon.stats("127.0.0.1") ``` """ @type host() :: String.t() alias HLDSRcon.ServerInfo alias HLDSRcon.RconClient @doc """ Connect to a HLDS server, using the `HLDSRcon.ServerInfo` struct to specify server information """ @spec connect(%ServerInfo{}) :: {:ok, pid()} | {:error, atom()} def connect(%ServerInfo{} = server_info) do DynamicSupervisor.start_child(HLDSRcon.ClientSupervisor, {RconClient, server_info}) end @doc """ Connect to a HLDS server at host with password, default port will be used """ @spec connect(host(), String.t()) :: {:ok, pid()} | {:error, atom()} def connect(host, password) when is_binary(password) do connect(host, ServerInfo.default_port, password) end @doc """ Connect to a HLDS server at host:port with password """ @spec connect(host(), integer(), String.t()) :: {:ok, pid()} | {:error, atom()} def connect(host, port, password) when is_integer(port) do connect(%ServerInfo{ host: host, port: port, password: password }) end @doc """ Get result of running rcon `stats` command on a connected server, using the default port """ @spec stats(host()) :: {:ok, HLDSRcon.ServerInfo.t()} | {:error, atom()} def stats(host) do stats(host, ServerInfo.default_port) end @doc """ Get the result of running rcon `stats` on a connected server Returning: `{:ok, %HLDSRcon.Stats{}}` when successful Returning: `{:error, reason}` when unsuccessful """ @spec stats(host(), integer()) :: {:ok, HLDSRcon.ServerInfo.t()} | {:error, atom()} def stats(host, port) do case :global.whereis_name(host <> ":" <> Integer.to_string(port)) do :undefined -> {:error, :not_connected} _pid -> RconClient.stats(host, port) end end @doc """ Run an arbitrary rcon command on a connected server, using the default port """ @spec command(host(), String.t()) :: {:ok, String.t()} | {:error, atom()} def command(host, command) when is_binary(command) do command(host, ServerInfo.default_port, command) end @doc """ Run an arbitrary rcon command on a connected server Returning: `{:ok, raw_response}` when successful Returning: `{:error, reason}` when unsuccessful """ @spec command(host(), integer(), String.t()) :: {:ok, String.t()} | {:error, atom()} def command(host, port, command) when is_integer(port) do case :global.whereis_name(host <> ":" <> Integer.to_string(port)) do :undefined -> {:error, :not_connected} _pid -> RconClient.command(host, port, command) end end @doc """ Cleaning disconnect a connected client at `host` with default port """ @spec disconnect(host()) :: {:ok, atom()} def disconnect(host) do disconnect(host, ServerInfo.default_port) end @doc """ Cleaning disconnect a connected client at `host`:`port` """ @spec disconnect(host(), integer()) :: {:ok, atom()} def disconnect(host, port) when is_integer(port) do case :global.whereis_name(host <> ":" <> Integer.to_string(port)) do :undefined -> {:error, :not_connected} pid -> RconClient.disconnect(host, port) case DynamicSupervisor.stop(HLDSRcon.ClientSupervisor, pid) do :ok -> {:ok, :normal} error_tuple -> error_tuple end end end end
lib/hlds_rcon.ex
0.883412
0.755952
hlds_rcon.ex
starcoder
defmodule Bitcoinex.Secp256k1 do @moduledoc """ ECDSA Secp256k1 curve operations. libsecp256k1: https://github.com/bitcoin-core/secp256k1 Currently supports ECDSA public key recovery. In the future, we will NIF for critical operations. However, it is more portable to have a native elixir version. """ use Bitwise, only_operators: true alias Bitcoinex.Secp256k1.{Math, Params, Point} @generator_point %Point{ x: Params.curve().g_x, y: Params.curve().g_y } defmodule Signature do @moduledoc """ Contains r,s in signature. """ @type t :: %__MODULE__{ r: pos_integer(), s: pos_integer() } @enforce_keys [ :r, :s ] defstruct [:r, :s] @spec parse_signature(binary) :: {:ok, t()} | {:error, String.t()} @doc """ accepts a compact signature and returns a Signature containing r,s """ def parse_signature(<<r::binary-size(32), s::binary-size(32)>>) do # Get r,s from signature. r = :binary.decode_unsigned(r) s = :binary.decode_unsigned(s) # Verify that r,s are integers in [1, n-1] where n is the integer order of G. cond do r < 1 -> {:error, "invalid signature"} r > Params.curve().n - 1 -> {:error, "invalid signature"} s < 1 -> {:error, "invalid signature"} s > Params.curve().n - 1 -> {:error, "invalid signature"} true -> {:ok, %Signature{r: r, s: s}} end end def parse_signature(compact_sig) when is_binary(compact_sig), do: {:error, "invalid signature size"} @doc """ der_parse_signature parses a DER binary to a Signature """ # @spec der_parse_signature(binary) :: {:ok, Signature.()} | {:error, String.t()} def der_parse_signature(<<0x30>> <> der_sig) when is_binary(der_sig) do sig_len = :binary.at(der_sig, 0) if sig_len + 1 != byte_size(der_sig) do {:error, "invalid signature length"} else case parse_sig_key(der_sig, 1) do {:error, err} -> {:error, err} {r, s_pos} -> case parse_sig_key(der_sig, s_pos) do {:error, err} -> {:error, err} {s, sig_len} -> if sig_len != byte_size(der_sig) do {:error, "invalid signature: signature is too long"} else {:ok, %Signature{r: r, s: s}} end end end end end def der_parse_signature(_), do: {:error, "invalid signature"} defp parse_sig_key(data, pos) do if :binary.at(data, pos) != 0x02 do {:error, "invalid signature key marker"} else k_len = :binary.at(data, pos + 1) len_k = :binary.part(data, pos + 2, k_len) {:binary.decode_unsigned(len_k), pos + 2 + k_len} end end @doc """ der_serialize_signature returns the DER serialization of an ecdsa signature """ @spec der_serialize_signature(Signature.t()) :: binary def der_serialize_signature(%Signature{r: r, s: s}) do r_bytes = serialize_sig_key(r) s_bytes = serialize_sig_key(s) <<0x30>> <> len_as_bytes(r_bytes <> s_bytes) <> r_bytes <> s_bytes end def der_serialize_signature(_), do: {:error, "Signature object required"} defp serialize_sig_key(k) do k |> :binary.encode_unsigned() |> lstrip(<<0x00>>) |> add_high_bit() |> prefix_key() end defp len_as_bytes(data), do: :binary.encode_unsigned(byte_size(data)) defp lstrip(<<head::binary-size(1)>> <> tail, val) do if head == val, do: lstrip(tail, val), else: head <> tail end defp add_high_bit(k_bytes) do unless (:binary.at(k_bytes, 0) &&& 0x80) == 0 do <<0x00>> <> k_bytes else k_bytes end end defp prefix_key(k_bytes), do: <<0x02>> <> len_as_bytes(k_bytes) <> k_bytes end @doc """ ecdsa_recover_compact does ECDSA public key recovery. """ @spec ecdsa_recover_compact(binary, binary, integer) :: {:ok, binary} | {:error, String.t()} def ecdsa_recover_compact(msg, compact_sig, recoveryId) do # Parse r and s from the signature. case Signature.parse_signature(compact_sig) do {:ok, sig} -> # Find the iteration. # R(x) = (n * i) + r # where n is the order of the curve and R is from the signature. r_x = Params.curve().n * Integer.floor_div(recoveryId, 2) + sig.r # Check that R(x) is on the curve. if r_x > Params.curve().p do {:error, "R(x) is not on the curve"} else # Decompress to get R(y). case get_y(r_x, rem(recoveryId, 2) == 1) do {:ok, r_y} -> # R(x,y) point_r = %Point{x: r_x, y: r_y} # Point Q is the recovered public key. # We satisfy this equation: Q = r^-1(sR-eG) inv_r = Math.inv(sig.r, Params.curve().n) inv_r_s = (inv_r * sig.s) |> Math.modulo(Params.curve().n) # R*s point_sr = Math.multiply(point_r, inv_r_s) # Find e using the message hash. e = :binary.decode_unsigned(msg) |> Kernel.*(-1) |> Math.modulo(Params.curve().n) |> Kernel.*(inv_r |> Math.modulo(Params.curve().n)) # G*e point_ge = Math.multiply(@generator_point, e) # R*e * G*e point_q = Math.add(point_sr, point_ge) # Returns serialized compressed public key. {:ok, Point.serialize_public_key(point_q)} {:error, error} -> {:error, error} end end {:error, e} -> {:error, e} end end @doc """ Returns the y-coordinate of a secp256k1 curve point (P) using the x-coordinate. To get P(y), we solve for y in this equation: y^2 = x^3 + 7. """ @spec get_y(integer, boolean) :: {:ok, integer} | {:error, String.t()} def get_y(x, is_y_odd) do # x^3 + 7 y_sq = :crypto.mod_pow(x, 3, Params.curve().p) |> :binary.decode_unsigned() |> Kernel.+(7 |> Math.modulo(Params.curve().p)) # Solve for y. y = :crypto.mod_pow(y_sq, Integer.floor_div(Params.curve().p + 1, 4), Params.curve().p) |> :binary.decode_unsigned() y = case rem(y, 2) == 1 do ^is_y_odd -> y _ -> Params.curve().p - y end # Check. if y_sq != :crypto.mod_pow(y, 2, Params.curve().p) |> :binary.decode_unsigned() do {:error, "invalid sq root"} else {:ok, y} end end @doc """ verify_point verifies that a given point is on the secp256k1 curve """ @spec verify_point(Point.t()) :: bool def verify_point(%Point{x: x, y: y}) do y_odd = rem(y, 2) == 1 {:ok, new_y} = get_y(x, y_odd) y == new_y end @doc """ verify whether the signature is valid for the given message hash and public key """ @spec verify_signature(Point.t(), integer, Signature.t()) :: boolean def verify_signature(pubkey, sighash, %Signature{r: r, s: s}) do n = Params.curve().n s_inv = Math.inv(s, n) u = Math.modulo(sighash * s_inv, n) v = Math.modulo(r * s_inv, n) total = Math.add(Math.multiply(@generator_point, u), Math.multiply(pubkey, v)) total.x == r end end
lib/secp256k1/secp256k1.ex
0.900898
0.473901
secp256k1.ex
starcoder
defmodule Ed25519 do use Bitwise @moduledoc """ Ed25519 signature functions This is mostly suitable as part of a pure Elixir solution. ## Configuration *No configuration is needed* in most cases. However, if needed, a custom hash function can be configured. As per the specification - `sha512` is the default. `config/config.exs` import Config # The hash function will be invoked as 'Blake2.hash2b(payload, 16)' config :ed25519, hash_fn: {Blake2, :hash2b, [], [16]} # The hash function will be invoked as ':crypto.hash(:sha256, payload)' config :ed25519, hash_fn: {:crypto, :hash, [:sha256], []} """ @typedoc """ public or secret key """ @type key :: binary @typedoc """ computed signature """ @type signature :: binary @p 57_896_044_618_658_097_711_785_492_504_343_953_926_634_992_332_820_282_019_728_792_003_956_564_819_949 @l 7_237_005_577_332_262_213_973_186_563_042_994_240_857_116_359_379_907_606_001_950_938_285_454_250_989 @d -4_513_249_062_541_557_337_682_894_930_092_624_173_785_641_285_191_125_241_628_941_591_882_900_924_598_840_740 @i 19_681_161_376_707_505_956_807_079_304_988_542_015_446_066_515_923_890_162_744_021_073_123_829_784_752 @t254 28_948_022_309_329_048_855_892_746_252_171_976_963_317_496_166_410_141_009_864_396_001_978_282_409_984 @base {15_112_221_349_535_400_772_501_151_409_588_531_511_454_012_693_041_857_206_046_113_283_949_847_762_202, 46_316_835_694_926_478_169_428_394_003_475_163_141_307_993_866_256_225_615_783_033_603_165_251_855_960} defp xrecover(y) do xx = (y * y - 1) * inv(@d * y * y + 1) x = expmod(xx, div(@p + 3, 8), @p) x = case (x * x - xx) |> mod(@p) do 0 -> x _ -> mod(x * @i, @p) end case x |> mod(2) do 0 -> @p - x _ -> x end end defp mod(x, _y) when x == 0, do: 0 defp mod(x, y) when x > 0, do: rem(x, y) defp mod(x, y) when x < 0, do: rem(y + rem(x, y), y) # __using__ Macro generates the hash function at compile time, which allows the # hashing function to be configurable without runtime overhead use Ed25519.Hash defp hashint(m), do: m |> hash |> :binary.decode_unsigned(:little) # :crypto.mod_pow chokes on negative inputs, so we feed it positive values # only and patch up the result if necessary defp expmod(_b, 0, _m), do: 1 defp expmod(b, e, m) when b > 0 do b |> :crypto.mod_pow(e, m) |> :binary.decode_unsigned() end defp expmod(b, e, m) do i = b |> abs() |> :crypto.mod_pow(e, m) |> :binary.decode_unsigned() cond do mod(e, 2) == 0 -> i i == 0 -> i true -> m - i end end defp inv(x), do: x |> expmod(@p - 2, @p) defp edwards({x1, y1}, {x2, y2}) do x = (x1 * y2 + x2 * y1) * inv(1 + @d * x1 * x2 * y1 * y2) y = (y1 * y2 + x1 * x2) * inv(1 - @d * x1 * x2 * y1 * y2) {mod(x, @p), mod(y, @p)} end defp encodepoint({x, y}) do val = y |> band(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) |> bor((x &&& 1) <<< 255) <<val::little-size(256)>> end defp decodepoint(<<n::little-size(256)>>) do xc = n |> bsr(255) y = n |> band(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) x = xrecover(y) point = case x &&& 1 do ^xc -> {x, y} _ -> {@p - x, y} end if isoncurve(point), do: point, else: raise("Point off Edwards curve") end defp decodepoint(_), do: raise("Provided value not a key") defp isoncurve({x, y}), do: (-x * x + y * y - 1 - @d * x * x * y * y) |> mod(@p) == 0 @doc """ Returns whether a given `key` lies on the ed25519 curve. """ @spec on_curve?(key) :: boolean def on_curve?(key) do try do decodepoint(key) true rescue _error -> false end end @doc """ Sign a message If only the secret key is provided, the public key will be derived therefrom. This adds significant overhead. """ @spec signature(binary, key, key) :: signature def signature(m, sk, pk \\ nil) def signature(m, sk, nil), do: signature(m, sk, derive_public_key(sk)) def signature(m, sk, pk) do h = hash(sk) a = a_from_hash(h) r = hashint(:binary.part(h, 32, 32) <> m) bigr = r |> scalarmult(@base) |> encodepoint s = mod(r + hashint(bigr <> pk <> m) * a, @l) bigr <> <<s::little-size(256)>> end defp a_from_hash(<<h::little-size(256), _rest::binary>>) do @t254 + (h |> band(0xF3FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF8)) end defp scalarmult(0, _pair), do: {0, 1} defp scalarmult(e, p) do q = e |> div(2) |> scalarmult(p) q = edwards(q, q) case e &&& 1 do 1 -> edwards(q, p) _ -> q end end defp clamp(c) do c |> band(~~~7) |> band(~~~(128 <<< (8 * 31))) |> bor(64 <<< (8 * 31)) end @doc """ validate a signed message """ @spec valid_signature?(signature, binary, key) :: boolean def valid_signature?(<<for_r::binary-size(32), s::little-size(256)>>, m, pk) when byte_size(pk) == 32 do r = decodepoint(for_r) a = decodepoint(pk) h = hashint(encodepoint(r) <> pk <> m) scalarmult(s, @base) == edwards(r, scalarmult(h, a)) end def valid_signature?(_s, _m_, _pk), do: false @doc """ Generate a secret/public key pair Returned tuple contains `{random_secret_key, derived_public_key}` """ @spec generate_key_pair :: {key, key} def generate_key_pair do secret = :crypto.strong_rand_bytes(32) {secret, derive_public_key(secret)} end @doc """ Generate a secret/public key pair from supplied secret Returned tuple contains `{secret_key, derived_public_key}` """ @spec generate_key_pair(key) :: {key, key} def generate_key_pair(secret) do {secret, derive_public_key(secret)} end @doc """ derive the public signing key from the secret key """ @spec derive_public_key(key) :: key def derive_public_key(sk) do sk |> hash |> a_from_hash |> scalarmult(@base) |> encodepoint end @doc """ Derive the x25519/curve25519 encryption key from the ed25519 signing key By converting an `EdwardsPoint` on the Edwards model to the corresponding `MontgomeryPoint` on the Montgomery model Handles either `:secret` or `:public` keys as indicated in the call May `raise` on an invalid input key or unknown atom See: https://blog.filippo.io/using-ed25519-keys-for-encryption """ @spec to_curve25519(key, atom) :: key def to_curve25519(key, which) def to_curve25519(ed_public_key, :public) do {_, y} = decodepoint(ed_public_key) u = mod((1 + y) * inv(1 - y), @p) <<u::little-size(256)>> end def to_curve25519(ed_secret_key, :secret) do <<digest32::little-size(256), _::binary-size(32)>> = :crypto.hash(:sha512, ed_secret_key) <<clamp(digest32)::little-size(256)>> end end
lib/ed25519.ex
0.867275
0.503113
ed25519.ex
starcoder
defmodule Realtime.VehiclePositions do @moduledoc """ Main entrypoint for realtime vehicle positions """ use GenServer require Logger alias Realtime.Messages.FeedMessage alias Realtime.{VehiclePositionFinder, VehiclePositionsSource} @behaviour VehiclePositionsSource def child_spec(args) do %{ id: args[:id], start: {__MODULE__, :start_link, [args]}, restart: :permanent, shutdown: 5000, type: :worker } end def start_link(args) do feed_name = Keyword.get(args, :feed_name) vehicle_positions_url = Keyword.get(args, :vehicle_positions_url) name = via_tuple(feed_name) Logger.info(fn -> "Starting VehiclePositions realtime process for #{feed_name} using url: #{vehicle_positions_url}" end) GenServer.start_link(__MODULE__, args, name: name) end @impl GenServer def init(args) do schedule_fetch(500) {:ok, %{ feed_name: args[:feed_name], vehicle_positions_url: args[:vehicle_positions_url], realtime_data: nil, last_fetched: nil }} end @impl VehiclePositionsSource def find_vehicle_position(feed_name, trip_remote_id) do case Registry.lookup(__MODULE__, feed_name) do [_ | _] -> GenServer.call( via_tuple(feed_name), {:find_vehicle_position, trip_remote_id} ) _ -> {:reply, {:error, :no_realtime_process}} end end @impl GenServer def handle_call({:find_vehicle_position, _, _}, _, %{realtime_data: nil} = state), do: {:reply, {:error, :no_data}, state} def handle_call( {:find_vehicle_position, trip_remote_id}, _, %{realtime_data: realtime_data} = state ) do case VehiclePositionFinder.find_vehicle_position(realtime_data, trip_remote_id) do nil -> {:reply, {:error, :no_position_data}, state} position -> {:reply, {:ok, position}, state} end end @impl GenServer def handle_info(:fetch_feed, state) do Logger.debug(fn -> "Updating VehiclePositions realtime info for #{state.feed_name}" end) case HTTPoison.get(state.vehicle_positions_url) do {:ok, response} -> realtime = FeedMessage.decode(response.body) old_realtime = state.realtime_data state = %{state | realtime_data: realtime, last_fetched: Timex.now()} Logger.info(fn -> "Successfully refreshed VehiclePositions realtime data for feed #{state.feed_name} at #{ inspect(state.last_fetched) }" end) case is_nil(old_realtime) || old_realtime.header.timestamp != realtime.header.timestamp do true -> Logger.info("Pushing vehicle positions event notification") notify_subscribers() false -> Logger.debug("Realtime data timestamp matches old, skipping vehicle positions event notification") end schedule_fetch(13_000) {:noreply, state} error -> Logger.error(fn -> "Failed to fetch VehiclePositions realtime data for feed #{inspect(state.feed_name)} at #{ inspect(Timex.now()) }, error: #{inspect(error)}" end) schedule_fetch(5_000) {:noreply, state} end end defp notify_subscribers do Registry.dispatch(Registry.Realtime, :vehicle_positions, fn entries -> for {pid, _} <- entries do send(pid, {:realtime, :vehicle_positions}) end end) end defp schedule_fetch(time_ms) do Process.send_after(self(), :fetch_feed, time_ms) end defp via_tuple(feed_name) do {:via, Registry, {__MODULE__, feed_name}} end end
apps/realtime/lib/realtime/vehicle_positions.ex
0.748904
0.432183
vehicle_positions.ex
starcoder
defmodule Scrub.CIP.Type do import Scrub.BinaryUtils, warn: false alias Scrub.CIP.Symbol def decode(<<0xA0, 0x02, _crc::uint, data::binary>>, %{members: members} = structure) do IO.puts("Template: #{structure.template_name}") IO.inspect(structure) Enum.reduce(members, [], fn %{name: <<"ZZZZZZZZZ", _tail::binary>>}, acc -> acc %{type: :bool, offset: offset, bit_location: location} = member, acc -> <<_offset::binary(offset, 8), host::binary(1, 8), _tail::binary>> = data offset = 7 - location <<_offset::binary(offset, 1), value::binary(1, 1), _pad::binary(location, 1)>> = host {value, _} = decode_type_data(:bool, value) [Map.put(member, :value, value) | acc] %{type: type, offset: offset, array_length: 0} = member, acc -> IO.inspect(type) <<_offset::binary(offset, 8), tail::binary()>> = data {value, _tail} = decode_type_data(type, tail) [Map.put(member, :value, value) | acc] %{type: type, offset: offset, array_length: length} = member, acc -> <<_offset::binary(offset, 8), data::binary()>> = data {values, _} = Enum.reduce(1..length, {[], data}, fn _, {values, data} -> {value, data} = decode_type_data(type, data) {[value | values], data} end) [Map.put(member, :value, values) | acc] end) |> Enum.reverse() end def decode(<<type::binary(2, 8), data::binary()>>, _t) do Symbol.type_decode(type) |> decode_type(data) end def decode("", _t) do :invalid end def decode_type(_, _, _ \\ []) def decode_type(_type, <<>>, [acc]), do: acc def decode_type(_type, <<>>, [_ | _] = acc), do: Enum.reverse(acc) def decode_type(type, data, acc) do {value, tail} = decode_type_data(type, :binary.copy(data)) decode_type(type, tail, [value | acc]) end def decode_type_data(:bool, <<0x01, tail::binary>>), do: {true, tail} def decode_type_data(:bool, <<0xFF, tail::binary>>), do: {true, tail} def decode_type_data(:bool, <<0x00, tail::binary>>), do: {false, tail} def decode_type_data(:bool, <<1::size(1), tail::binary>>), do: {true, tail} def decode_type_data(:bool, <<0::size(1), tail::binary>>), do: {false, tail} def decode_type_data(:int, <<value::int, tail::binary>>), do: {value, tail} def decode_type_data(:sint, <<value::sint, tail::binary>>), do: {value, tail} def decode_type_data(:dint, <<value::dint, tail::binary>>), do: {value, tail} def decode_type_data(:lint, <<value::lint, tail::binary>>), do: {value, tail} def decode_type_data(:usint, <<value::usint, tail::binary>>), do: {value, tail} def decode_type_data(:uint, <<value::uint, tail::binary>>), do: {value, tail} def decode_type_data(:udint, <<value::udint, tail::binary>>), do: {value, tail} def decode_type_data(:ulint, <<value::ulint, tail::binary>>), do: {value, tail} def decode_type_data(:real, <<value::real, tail::binary>>), do: {value, tail} def decode_type_data(:lreal, <<value::lreal, tail::binary>>), do: {value, tail} def decode_type_data(:string, <<length::udint, value::binary(length, 8), tail::binary>>), do: {value, tail} def decode_type_data(:dword, <<value::binary(4, 8), tail::binary()>>), do: {:binary.copy(value), tail} def decode_type_data(_, <<0x00::uint, tail::binary>>), do: {nil, tail} def decode_type_data(_type, data), do: {data, <<>>} end
lib/scrub/cip/type.ex
0.632616
0.415581
type.ex
starcoder
defmodule GGity.Geom.Rect do @moduledoc false alias GGity.{Draw, Geom, Plot} @type t() :: %__MODULE__{} @type plot() :: %Plot{} @type record() :: map() @type mapping() :: map() defstruct data: nil, mapping: nil, stat: :identity, position: :identity, key_glyph: :rect, alpha: 1, fill: "black", color: "black", size: 0, custom_attributes: nil @spec new(mapping(), keyword()) :: Geom.Rect.t() def new(mapping, options) do struct(Geom.Rect, [{:mapping, mapping} | options]) end @spec draw(Geom.Rect.t(), list(map()), plot()) :: iolist() def draw(%Geom.Rect{} = geom_rect, data, plot), do: rects(geom_rect, data, plot) defp rects(%Geom.Rect{} = geom_rect, data, %Plot{scales: scales} = plot) do scale_transforms = geom_rect.mapping |> Map.keys() |> Enum.reduce(%{}, fn aesthetic, mapped -> Map.put(mapped, aesthetic, Map.get(scales[aesthetic], :transform)) end) transforms = geom_rect |> Map.take([:alpha, :color, :fill, :size]) |> Enum.reduce(%{}, fn {aesthetic, fixed_value}, fixed -> Map.put(fixed, aesthetic, fn _value -> fixed_value end) end) |> Map.merge(scale_transforms) Enum.map(data, fn row -> rect(row, transforms, geom_rect, plot) end) end defp rect(row, transforms, geom_rect, plot) do transformed_values = [ transforms.x.(row[geom_rect.mapping.xmin]), transforms.x.(row[geom_rect.mapping.xmax]), transforms.y.(row[geom_rect.mapping.ymin]), transforms.y.(row[geom_rect.mapping.ymax]), transforms.alpha.(row[geom_rect.mapping[:alpha]]), transforms.color.(row[geom_rect.mapping[:color]]), transforms.fill.(row[geom_rect.mapping[:fill]]), transforms.size.(row[geom_rect.mapping[:size]]) ] labelled_values = Enum.zip( [:xmin, :xmax, :ymin, :ymax, :fill_opacity, :stroke, :fill, :stroke_width], transformed_values ) options = Keyword.drop(labelled_values, [:xmin, :xmax, :ymin, :ymax]) x = labelled_values[:xmin] + plot.area_padding y = (plot.width - labelled_values[:ymax]) / plot.aspect_ratio + plot.area_padding height = (labelled_values[:ymax] - labelled_values[:ymin]) / plot.aspect_ratio width = labelled_values[:xmax] - labelled_values[:xmin] custom_attributes = GGity.Layer.custom_attributes(geom_rect, plot, row) Draw.rect( [{:x, x}, {:y, y}, {:height, height}, {:width, width} | options] ++ custom_attributes ) end end
lib/ggity/geom/rect.ex
0.891298
0.646983
rect.ex
starcoder
defmodule Protobuf.Verifier do @moduledoc """ Checks whether the values used when instantiating a new protobuf struct are valid. """ import Protobuf.WireTypes alias Protobuf.{MessageProps, FieldProps, FieldOptionsProcessor} @doc """ Returns `:ok` or a tuple `{:error, <list-of-issues>}` """ @spec verify(struct) :: :ok | {:error, [String.t()]} def verify(%mod{} = struct), do: do_verify(struct, mod.__message_props__()) @spec verify(atom, struct | map) :: :ok | {:error, [String.t()]} def verify(mod, msg) do case msg do %{__struct__: ^mod} -> verify(msg) _ -> if is_map(msg) and Map.has_key?(msg, :__struct__) and mod != msg.__struct__ do {:error, ["got #{msg.__struct__} but expected #{mod}"]} else verify(mod.new!(msg)) end end end @spec do_verify(struct, MessageProps.t()) :: :ok | {:error, [String.t()]} defp do_verify(struct, %{field_props: field_props} = props) do syntax = props.syntax with {:ok, oneofs} <- oneof_actual_vals(props, struct), :ok <- verify_fields(Map.values(field_props), syntax, struct, oneofs) do if syntax == :proto2 do verify_extensions(struct) else :ok end else :ok -> :ok {:error, messages} -> {:error, messages} end end defp verify_fields(fields, syntax, struct, oneofs) do Enum.map(fields, fn %{name_atom: name, oneof: oneof} = prop -> val = if oneof do oneofs[name] else case struct do %{^name => v} -> v _ -> nil end end if skip_field?(syntax, val, prop) || skip_enum?(prop, val) do :ok else verify_field(class_field(prop), val, prop) |> wrap_error(struct, prop) end end) |> ok_or_aggregate_errors() end defp wrap_error(:ok, _struct, _prop), do: :ok defp wrap_error({:error, msg}, struct, prop) do wrapped_msg = "Error when verifying the value(s) of #{inspect(struct.__struct__)}##{prop.name_atom}: #{ msg }" {:error, wrapped_msg} end def skip_field?(_syntax, val, %{type: type, options: options} = prop) when not is_nil(options), do: FieldOptionsProcessor.skip_verify?(type, val, prop, options) def skip_field?(_, [], _), do: true def skip_field?(_, v, _) when map_size(v) == 0, do: true def skip_field?(:proto2, nil, %{optional?: true}), do: true def skip_field?(:proto3, nil, _), do: true def skip_field?(_, _, _), do: false @spec verify_field(atom, any, FieldProps.t()) :: :ok | {:error, [String.t()]} defp verify_field( :normal, val, %{type: type, repeated?: is_repeated} = prop ) do repeated_or_not(val, is_repeated, fn v -> if is_nil(prop.options) do verify_type(type, v) else FieldOptionsProcessor.verify_type(type, v, prop.options) end end) |> ok_or_aggregate_errors() end # The guard ensures that val's type matches the is_repeated or is_map parameters defp verify_field( :embedded, val, %{repeated?: is_repeated, map?: is_map, type: type} = prop ) when (is_repeated and is_list(val)) or (is_map and is_map(val)) or (not is_repeated and not is_map) do repeated = is_repeated or is_map repeated_or_not(val, repeated, fn v -> v = if is_map, do: struct(prop.type, %{key: elem(v, 0), value: elem(v, 1)}), else: v if is_nil(prop.options) do verify(type, v) else FieldOptionsProcessor.verify_type(type, v, prop.options) end end) |> ok_or_aggregate_errors() end # A catchall for params that don't match the verify_field(:embedded) guard above defp verify_field(:embedded, val, prop), do: {:error, "Got a value: #{inspect(val)} that isn't a map or list for the repeated or map field #{ prop.name_atom }"} defp repeated_or_not(val, true = _repeated, func) when is_list(val) or is_tuple(val) or is_map(val), do: Enum.map(val, func) defp repeated_or_not(_val, true = _repeated, _func), do: [{:error, "Got value for repeated or map field that wasn't a list, tuple, or map"}] defp repeated_or_not(val, false = _repeated, func), do: [func.(val)] @spec ok_or_aggregate_errors([:ok | {:error, String.t()}]) :: :ok | {:error, [String.t()]} defp ok_or_aggregate_errors([]), do: :ok defp ok_or_aggregate_errors([:ok | rest]), do: ok_or_aggregate_errors(rest) defp ok_or_aggregate_errors([{:error, message} | rest]), do: ok_or_aggregate_errors(rest, [message]) defp ok_or_aggregate_errors([], messages), do: {:error, messages} defp ok_or_aggregate_errors([:ok | rest], messages), do: ok_or_aggregate_errors(rest, messages) defp ok_or_aggregate_errors([{:error, message} | rest], messages), do: ok_or_aggregate_errors(rest, messages ++ [message]) @spec class_field(map) :: atom defp class_field(%{wire_type: wire_delimited(), embedded?: true}), do: :embedded defp class_field(_), do: :normal @spec verify_type(atom, any) :: :ok | {:error, String.t()} def verify_type(:string, n) when is_binary(n), do: :ok def verify_type(:bool, true), do: :ok def verify_type(:bool, false), do: :ok def verify_type(:float, :infinity), do: :ok def verify_type(:float, :negative_infinity), do: :ok def verify_type(:float, :nan), do: :ok def verify_type(:float, n) when is_number(n), do: :ok def verify_type(:double, :infinity), do: :ok def verify_type(:double, :negative_infinity), do: :ok def verify_type(:double, :nan), do: :ok def verify_type(:double, n) when is_number(n), do: :ok def verify_type(:bytes, n) when is_binary(n), do: :ok def verify_type(:int32, n) when is_integer(n) and n >= -0x80000000 and n <= 0x7FFFFFFF, do: :ok def verify_type(:int64, n) when is_integer(n) and n >= -0x8000000000000000 and n <= 0x7FFFFFFFFFFFFFFF, do: :ok def verify_type(:uint32, n) when is_integer(n) and n >= 0 and n <= 0xFFFFFFFF, do: :ok def verify_type(:uint64, n) when is_integer(n) and n >= 0 and n <= 0xFFFFFFFFFFFFFFFF, do: :ok def verify_type(:sint32, n) when is_integer(n) and n >= -0x80000000 and n <= 0x7FFFFFFF, do: :ok def verify_type(:sint64, n) when is_integer(n) and n >= -0x8000000000000000 and n <= 0x7FFFFFFFFFFFFFFF, do: :ok def verify_type(:fixed64, n) when is_integer(n) and n >= 0 and n <= 0xFFFFFFFFFFFFFFFF, do: :ok def verify_type(:sfixed64, n) when is_integer(n) and n >= -0x8000000000000000 and n <= 0x7FFFFFFFFFFFFFFF, do: :ok def verify_type(:fixed32, n) when is_integer(n) and n >= 0 and n <= 0xFFFFFFFF, do: :ok def verify_type(:sfixed32, n) when is_integer(n) and n >= -0x80000000 and n <= 0x7FFFFFFF, do: :ok def verify_type({:enum, type}, n) when is_atom(n) do if type.mapping() |> Map.has_key?(n) do :ok else {:error, "invalid value for enum #{type}"} end end def verify_type({:enum, type}, n) when is_integer(n) do if type.__reverse_mapping__() |> Map.has_key?(n) do :ok else {:error, "invalid value for enum #{type}"} end end # Enum failure case def verify_type({:enum, type}, _n) do {:error, "invalid value for type #{type}"} end # General failure case def verify_type(type, _n) do {:error, "invalid value for type #{type}"} end defp skip_enum?(%{type: type, options: options} = prop, value) when not is_nil(options) do FieldOptionsProcessor.skip_verify?(type, value, prop, options) end defp skip_enum?(%{type: _type}, nil), do: true defp skip_enum?(%{type: _type}, _value), do: false defp oneof_actual_vals( %{field_tags: field_tags, field_props: field_props, oneof: oneof}, struct ) do result = Enum.reduce_while(oneof, %{}, fn {field, index}, acc -> case Map.get(struct, field, nil) do {f, val} -> %{oneof: oneof} = field_props[field_tags[f]] if oneof != index do {:halt, {:error, [":#{f} doesn't belong to #{inspect(struct.__struct__)}##{field}"]}} else {:cont, Map.put(acc, f, val)} end nil -> {:cont, acc} _ -> {:halt, {:error, [ "#{inspect(struct.__struct__)}##{field} has the wrong structure: the value of a oneof field should be nil or {key, val} where key = atom of a field name inside the oneof and val = its value" ]}} end end) case result do {:error, message} -> {:error, message} successful_result -> {:ok, successful_result} end end defp verify_extensions(%mod{__pb_extensions__: pb_exts}) when is_map(pb_exts) do Enum.map(pb_exts, fn {{ext_mod, key}, val} -> case Protobuf.Extension.get_extension_props(mod, ext_mod, key) do %{field_props: prop} -> if !skip_field?(:proto2, val, prop) || !skip_enum?(prop, val) do verify_field(class_field(prop), val, prop) end _ -> :ok end end) |> ok_or_aggregate_errors() end defp verify_extensions(_), do: :ok end
lib/protobuf/verifier.ex
0.835349
0.456168
verifier.ex
starcoder
defmodule BeepBop do @moduledoc """ Manages the state machine of an `Ecto.Schema`. """ alias Ecto.Multi alias BeepBop.{Utils, Context} @doc """ Configures `BeepBop` to work with your `Ecto.Repo`. Expected keyword arguments: * `:ecto_repo` -- Since BeepBop does the routine persisting of "state", it needs to know which `Ecto.Repo` to use. """ defmacro __using__(opts) do Utils.assert_repo!(opts) quote location: :keep do import BeepBop alias Ecto.Multi def __beepbop__(:repo), do: Keyword.fetch!(unquote(opts), :ecto_repo) Module.register_attribute(__MODULE__, :from_states, accumulate: true) Module.register_attribute(__MODULE__, :to_states, accumulate: true) Module.register_attribute(__MODULE__, :event_names, accumulate: true) @before_compile BeepBop end end defmacro state_machine(schema, column, states, do: block) do name = Utils.extract_schema_name(schema, __CALLER__) {states_list, _} = Code.eval_quoted(states) Utils.assert_states!(states_list) Utils.assert_num_states!(states_list) Module.put_attribute(__CALLER__.module, :beepbop_states, states_list) quote location: :keep, bind_quoted: [ name: name, schema: schema, column: column, states: states, block: block ] do Module.eval_quoted(__MODULE__, [ metadata(name, schema, column), context_validator(schema), persist_helpers() ]) Utils.assert_schema!(schema, column) Utils.assert_states!(states) @doc """ Returns the list of defined states in this machine. """ @spec states :: [atom] def states do @beepbop_states end @doc """ Checks if given `state` is defined in this machine. """ @spec state_defined?(atom) :: boolean def state_defined?(state) do Enum.member?(@beepbop_states, state) end block end end defmacro event(event, options, callback) do quote location: :keep do transition_opts = unquote(options) Utils.assert_transition_opts!(transition_opts) event_from_states = case transition_opts do %{from: %{not: not_from}} -> Enum.reject(@beepbop_states, fn x -> x in not_from end) %{from: :any} -> @beepbop_states %{from: from} -> from end to_state = Map.get(transition_opts, :to) @from_states {unquote(event), event_from_states} @to_states {unquote(event), to_state} @event_names unquote(event) @doc """ Runs the defined callback for this event. This function was generated by the `BeepBop.event/3` macro. """ @spec unquote(event)(Context.t(), keyword) :: Context.t() def unquote(event)(context, opts \\ [persist: true]) def unquote(event)(%Context{} = context, opts) do to_state = Map.get(unquote(options), :to) if can_transition?(context, unquote(event)) do context |> unquote(callback).() |> __beepbop_try_persist(to_state, opts) else struct(context, errors: {:error, "cannot transition, bad context"}, valid?: false) end end end end def metadata(name, schema, column) do quote location: :keep, bind_quoted: [ name: name, module: schema, column: column ] do @beepbop_name name @beepbop_module module @beepbop_column column def __beepbop__(:name), do: @beepbop_name def __beepbop__(:module), do: @beepbop_module def __beepbop__(:column), do: @beepbop_column def __beepbop__(:states), do: @beepbop_states end end def context_validator(schema) do quote location: :keep do @doc """ Validates the `context` struct. Returns `true` if `context` contains a struct of type `#{@beepbop_module}` under the `:struct` key. """ @spec valid_context?(Context.t()) :: boolean def valid_context?(context) def valid_context?(%Context{ struct: %unquote(schema){}, valid?: true, state: s, multi: %Multi{} }) when is_map(s), do: true def valid_context?(_), do: false end end def persist_helpers do quote location: :keep do defp __beepbop_final_multi(multi, struct, to_state) do Multi.run(multi, :persist, fn changes -> updated_struct = Map.get(changes, @beepbop_name) || struct to = Atom.to_string(to_state) __beepbop_persist(updated_struct, to) end) end defp __beepbop_try_persist(%Context{valid?: false} = context, _, _) do context end defp __beepbop_try_persist(%Context{valid?: true} = context, to_state, opts) do %{struct: struct, multi: multi} = context final_multi = case to_state do nil -> multi _ -> __beepbop_final_multi(multi, struct, to_state) end persist? = Keyword.get(opts, :persist, true) if persist? do repo = __beepbop__(:repo) repo_opts = Keyword.get(opts, :repo_opts, []) case repo.transaction(final_multi, repo_opts) do {:ok, result} -> struct(context, multi: result) error -> struct(context, valid?: false, errors: error) end else struct(context, multi: final_multi) end end end end def persistor(module) do if Module.defines?(module, {:persist, 2}, :def) do quote location: :keep do defp __beepbop_persist(struct, to_state) do __MODULE__.persist(struct, to_state) end end else quote location: :keep do defp __beepbop_persist(struct, to_state) do {:ok, Map.put(struct, @beepbop_column, to_state)} end end end end @doc false defmacro __before_compile__(env) do events = Module.get_attribute(env.module, :event_names) from_states = Module.get_attribute(env.module, :from_states) to_states = Module.get_attribute(env.module, :to_states) states = Module.get_attribute(env.module, :beepbop_states) Utils.assert_unique_events!(events) transitions = for event <- events, into: %{} do {event, %{ from: Keyword.fetch!(from_states, event), to: Keyword.fetch!(to_states, event) }} end Utils.assert_transitions!(states, transitions) Module.put_attribute(env.module, :transitions, transitions) quote location: :keep do Module.eval_quoted(__MODULE__, persistor(__MODULE__)) def __beepbop__(:events), do: @event_names def __beepbop__(:transitions), do: @transitions @doc """ Validates the `context` struct and checks if the transition via `event` is valid. """ @spec can_transition?(Context.t(), atom) :: boolean def can_transition?(context, event) do if valid_context?(context) do state = case Map.fetch(context.struct, @beepbop_column) do :error -> nil {:ok, something} when is_binary(something) -> String.to_atom(something) {:ok, something} when is_atom(something) -> something {:ok, nil} -> nil end from_states = Keyword.fetch!(@from_states, event) state in from_states else false end end end end end
lib/beepbop.ex
0.873255
0.617686
beepbop.ex
starcoder
defmodule Vow.Function do @moduledoc """ This module contains utilities for conforming the arguments and return values of functions. """ alias Vow.ConformError @typedoc """ The options for `Vow.conform_function/1`. * `:args` - a vow for the function arguments as they were a list to be passed to `apply/2` (optional) * `:ret` - a vow for the function's return value (optional) * `:fun` - a vow of the relationship between `:args` and `:ret`, the value passed is `%{args: [conformed_arg], ret: conformed_ret}` """ @type conform_opts :: [ {:args, [Vow.t()]} | {:ret, Vow.t()} | {:fun, Vow.t()} ] @type f :: (... -> any) | mfa | {module, atom} @doc """ Conforms the execution of function `fun`, given arguments `args`, via the `conform_opts`. This will validate that all arguments conform to the `:args` vows in `conform_opts` prior to function execution, and that the return value conforms to the `:ret` vow in `conform_opts`. Both the `:args` and `:ret` options are required for the `:fun` option. """ @spec conform(f, args :: [term], conform_opts) :: {:ok, {term, %{args: term, ret: term, fun: term}}} | {:error, {:args | :ret | :fun, ConformError.t()} | {:execute, reason :: term}} def conform(fun, args, opts) do with {:args, {:ok, conformed_args}} <- {:args, conform_args(args, opts)}, {:execute, {:ok, ret}} <- {:execute, execute(fun, args)}, {:ret, {:ok, conformed_ret}} <- {:ret, conform_ret(ret, opts)}, {:fun, {:ok, conformed_fun}} <- {:fun, conform_fun(conformed_args, conformed_ret, opts)} do conformed = %{args: conformed_args, ret: conformed_ret, fun: conformed_fun} {:ok, {ret, conformed}} else {op, {:error, problems}} -> {:error, {op, problems}} end end @spec conform_args(args :: [term], conform_opts) :: {:ok, [term] | nil} | {:error, ConformError.t()} defp conform_args(args, opts) do if Keyword.has_key?(opts, :args) do Vow.conform(Keyword.get(opts, :args), args) else {:ok, nil} end end @spec conform_ret(ret :: term, conform_opts) :: {:ok, term | nil} | {:error, ConformError.t()} defp conform_ret(ret, opts) do if Keyword.has_key?(opts, :ret) do Vow.conform(Keyword.get(opts, :ret), ret) else {:ok, nil} end end @spec conform_fun(conformed_args :: term, conformed_ret :: term, conform_opts) :: {:ok, term | nil} | {:error, ConformError.t()} defp conform_fun(conformed_args, conformed_ret, opts) do if has_all_keys?(opts, [:args, :ret, :fun]) do Vow.conform( Keyword.get(opts, :fun), %{args: conformed_args, ret: conformed_ret} ) else {:ok, nil} end end @spec has_all_keys?(keyword, [atom]) :: boolean defp has_all_keys?(keyword, keys) do Enum.all?(keys, &Keyword.has_key?(keyword, &1)) end @spec execute(f, args :: [term]) :: {:ok, result :: term} | {:error, reason :: term} defp execute(fun, args) do {:ok, execute!(fun, args)} rescue reason -> {:error, reason} catch caught -> {:error, caught} end @spec execute!(f, args :: [term]) :: term defp execute!({m, f, _}, args), do: execute!({m, f}, args) defp execute!({m, f}, args), do: apply(m, f, args) defp execute!(fun, args) when is_function(fun), do: fun.(args) end
lib/vow/function.ex
0.839997
0.638356
function.ex
starcoder
defmodule Exvalidate.Rules.Type do @moduledoc """ This validation check the type of variable. The checked types are the next: - :string => "name", "address", "language", - :list => ["name", "address", "language"], - :map => %{name: "Vegeta", address: "Vegeta planet"}, - :tuple => {:name, "Vegeta"}, - :number => 23, 2.3, 4, 0.9, -0.9, -4, - :integer => 23, 4, -4, - :float => 2.3, 0.9, -0.9. - :atom => :vegeta, :picolo - :boolean => true, false ### Examples atom ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :atom}, :Saiyajin) {:ok, :Saiyajin} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :atom}, 33) {:error, :type_value_wrong} ``` ### Examples string ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :string}, "Saiyajin") {:ok, "Boo"} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :string}, "Saiyajin") {:error, :type_value_wrong} ``` ### Examples list ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :list}, ["Saiyajin", "Namek"]) {:ok, ["Saiyajin", "Namek"]} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :list}, "Saiyajin") {:error, :type_value_wrong} ``` ### Examples map ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :map}, %{"Saiyajin" => "Vegetta"}) {:ok, %{"Saiyajin" => "Vegetta"}} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :map}, "Saiyajin") {:error, :type_value_wrong} ``` ### Examples tuple ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :tuple}, {"Saiyajin", "Namek"}) {:ok, {"Saiyajin", "Namek"}} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :tuple}, "Saiyajin") {:error, :type_value_wrong} ``` ### Examples number ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :number}, 3) {:ok, 3} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :number}, 3.3) {:ok, 3.3} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :number}, "3") {:ok, 3} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :number}, "3.3") {:ok, 3.3} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :number}, "Thr.ee") {:error, :type_value_wrong} ``` ### Examples float ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :float}, 3.3) {:ok, 3.3} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :float}, "3.3") {:ok, 3.3} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :float}, "Vegeta") {:error, :type_value_wrong} ``` ### Examples integer ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :integer}, 3) {:ok, 3} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :integer}, "3") {:ok, 3} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :integer}, "Vegeta") {:error, :type_value_wrong} ``` ### Examples boolean ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, true) {:ok, true} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, false) {:ok, false} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, "true") {:ok, true} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, "false") {:ok, false} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, "TRUE") {:ok, true} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, "FALSE") {:ok, false} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, "1") {:ok, true} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, "0") {:ok, false} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, 1) {:ok, true} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, 0) {:ok, false} ``` ``` iex(3)> Exvalidate.Rules.Type.validating({:type, :boolean}, "Vegeta") {:error, :type_value_wrong} ``` The :number type includes the types :float and :integer. """ use Exvalidate.Rules.IRules def validating({:type, type}, value) when is_atom(type) do case is_this_type(type, value) do {:ok, :valid} -> {:ok, value} {:ok, :not_valid} -> {:error, :type_value_wrong} {:ok, typed_value} -> {:ok, typed_value} {:error, msg} -> {:error, msg} end end def validating(_, _), do: {:error, :type_rule_wrong} defp is_this_type(:atom, value) when is_atom(value), do: {:ok, :valid} defp is_this_type(:atom, value) when is_binary(value) do {:ok, String.to_atom(value)} end defp is_this_type(:atom, _value), do: {:ok, :not_valid} defp is_this_type(:string, value) when is_binary(value), do: {:ok, :valid} defp is_this_type(:string, _value), do: {:ok, :not_valid} defp is_this_type(:list, value) when is_list(value), do: {:ok, :valid} defp is_this_type(:list, _value), do: {:ok, :not_valid} defp is_this_type(:map, value) when is_map(value), do: {:ok, :valid} defp is_this_type(:map, _value), do: {:ok, :not_valid} defp is_this_type(:tuple, value) when is_tuple(value), do: {:ok, :valid} defp is_this_type(:tuple, _value), do: {:ok, :not_valid} defp is_this_type(:boolean, value) when is_boolean(value), do: {:ok, :valid} defp is_this_type(:boolean, value) when is_binary(value) do case String.downcase(value) do "0" -> {:ok, false} "1" -> {:ok, true} "false" -> {:ok, false} "true" -> {:ok, true} _ -> {:ok, :not_valid} end end defp is_this_type(:boolean, 0), do: {:ok, false} defp is_this_type(:boolean, 1), do: {:ok, true} defp is_this_type(:boolean, _value), do: {:ok, :not_valid} defp is_this_type(:number, value) when is_number(value), do: {:ok, :valid} defp is_this_type(:number, value) when is_binary(value) do if String.contains?(value, ".") do is_this_type(:float, value) else is_this_type(:integer, value) end end defp is_this_type(:number, _value), do: {:ok, false} defp is_this_type(:integer, value) when is_integer(value), do: {:ok, :valid} defp is_this_type(:integer, value) when is_binary(value) do case Integer.parse(value) do {num, ""} -> {:ok, num} _ -> {:ok, :not_valid} end end defp is_this_type(:integer, _value), do: {:ok, :not_valid} defp is_this_type(:float, value) when is_float(value), do: {:ok, :valid} defp is_this_type(:float, value) when is_binary(value) do case Float.parse(value) do {num, ""} -> {:ok, num} _ -> {:ok, :not_valid} end end defp is_this_type(:float, _value), do: {:ok, :not_valid} defp is_this_type(_type, _value) do {:error, :type_value_is_not_supported} end end
lib/workflow/rules/type.ex
0.849488
0.92597
type.ex
starcoder
defmodule Mix.Tasks.Bench.Cmp do use Mix.Task @shortdoc "Compare benchmark snapshots" @moduledoc """ ## Usage mix bench.cmp [options] <snapshot>... A snapshot is the output of a single run of `mix bench`. If no arguments are given, bench.cmp will try to read one or two latest snapshots from the bench/snapshots directory. When given one snapshot, `mix bench.cmp` will pretty-print the results. Giving `-` instead of a file name will make bench.cmp read from standard input. When given two or more snapshots, it will pretty-print the comparison between the first and the last one. ## Options -f <fmt>, --format=<fmt> Which format to use for the deltas when pretty-printing. One of: ratio, percent. """ alias Benchfella.Snapshot alias Benchfella.CLI.Util def run(args) do switches = [format: :string] aliases = [f: :format] {snapshots, options} = case OptionParser.parse(args, strict: switches, aliases: aliases) do {opts, [], []} -> {Util.locate_snapshots(), opts} {opts, snapshots, []} -> {snapshots, opts} {_, _, [{opt, val}|_]} -> valstr = if val do "=#{val}" end Mix.raise "Invalid option: #{opt}#{valstr}" end |> normalize_options() case snapshots do [snapshot] -> pretty_print(snapshot) [first|rest] -> last = List.last(rest) compare(first, last, Map.get(options, :format, :ratio)) end end defp normalize_options({snapshots, options}) do options = Enum.reduce(options, %{}, fn {:format, fmt}, acc -> Map.put(acc, :format, parse_pretty_format(fmt)) end) {snapshots, options} end defp parse_pretty_format("ratio"), do: :ratio defp parse_pretty_format("percent"), do: :percent defp parse_pretty_format(other), do: Mix.raise "Undefined pretty format: #{other}" defp pretty_print("-") do Util.read_all_input() |> Snapshot.parse |> Snapshot.pretty_print end defp pretty_print(path) do IO.puts "#{path}\n" path |> File.read! |> Snapshot.parse |> Snapshot.pretty_print end defp compare(path1, path2, format) do IO.puts "#{path1} vs\n#{path2}\n" snapshot1 = File.read!(path1) |> Snapshot.parse() snapshot2 = File.read!(path2) |> Snapshot.parse() {diffs, leftover} = Snapshot.compare(snapshot1, snapshot2, format) max_len = Enum.reduce(diffs, 0, fn {name, _}, len -> max(len, String.length(name)) end) diffs |> Enum.sort(fn {_, diff1}, {_, diff2} -> diff1 < diff2 end) |> Enum.each(fn {name, diff} -> :io.format('~*.s ', [-max_len-1, name<>":"]) color = choose_color(diff, format) if format == :percent do diff = Snapshot.format_percent(diff) end colordiff = IO.ANSI.format color ++ ["#{diff}"] IO.puts colordiff end) unless leftover == [] do # FIXME: when more than 2 snapshots are given, this wording may be imprecise IO.puts "\nThese tests appeared only in one of the snapshots:" Enum.each(leftover, fn x -> IO.write " "; IO.puts x end) end end defp choose_color(diff, :ratio) do cond do diff < 1.0 -> [:green] diff > 1.0 -> [:red] true -> [] end end defp choose_color(diff, :percent) do cond do diff < 0 -> [:green] diff > 0 -> [:red] true -> [] end end end
lib/mix/tasks/bench_cmp.ex
0.752922
0.423995
bench_cmp.ex
starcoder
defmodule BloomList do @moduledoc """ A behaviour for implementing bloomfilter module. The `BloomList` behaviour is a implementation of the `GenServer` behaviour which will keep the bloomfilter in state of `GenServer`. Support some bloomfilter operations: * initialize: initial bloomfilter through call `init_bloom_data` callback * add: add `key(s)` to bloomfilter, and call `handle_add_single` or `handle_add_list` * delete: only call `handle_delete` callback rather than delete `key` from bloomfilter As we know, bloomfilter only can check one key `must not exist`, can't ensure one key must exist. So, if return `true` from bloomfilter through `member?` function, the `BloomList` can not return `true` directly to caller. Thus, `BloomList` will call `handle_maybe_exist` callback function to double check if the key really in bloomlist. An example `BloomList` module: defmodule BloomList.Test.BlackList do @moduledoc false use BloomList def start_link(_) do BloomList.start_link(__MODULE__, nil, name: __MODULE__, bloom_options: [capacity: 2000, error: 0.7] ) end def reinit(data_list \\ []) do BloomList.reinit_bloom_data(__MODULE__, data_list) end def add(key) do BloomList.add(__MODULE__, key) end def add_list(key_list) do BloomList.add_list(__MODULE__, key_list) end def delete(key) do BloomList.delete(__MODULE__, key) end def member?(key) do BloomList.member?(__MODULE__, key) end # callback def init_bloom_data(_) do data_list = [1, 2, 3, 4, 5] {data_list, %{data_list: data_list}} end # callback def handle_reinit_bloom_data([], _) do data_list = [2, 3, 4, 5, 6, 7] {data_list, %{data_list: data_list}} end def handle_reinit_bloom_data(data_list, _) do {data_list, %{data_list: data_list}} end # callback def handle_maybe_exist(key, %{data_list: data_list}) do Enum.member?(data_list, key) end # callback def handle_delete(key, %{data_list: data_list} = state) do %{state | data_list: List.delete(data_list, key)} end # callback def handle_add_single(key, %{data_list: data_list} = state) do %{state | data_list: [key | data_list]} end # callback def handle_add_list(key_list, %{data_list: data_list} = state) do %{state | data_list: key_list ++ data_list} end # __end_of_module__ end The example above realized a `blacklist` module which follows a common pattern. """ use GenServer @doc """ Called when the bloomlist first started. The params for this callback is passed from `start_link/3`. Returning `{[any], any}` the first element is a list, which will be put into bloomfilter as members to inital bloomfilter. And the second element is the custom state. """ @callback init_bloom_data(any) :: {[any], any} @doc """ Called the the bloomlist want to reinit bloomfilter members. The first param for this function is `data_list` which from the second param of `reinit_bloom_data/2`. def reinit_bloom_data(bloom_name, data_list) do GenServer.call(bloom_name, {:reinit_bloom_data, data_list}) end As the above function, the `data_list` will be passed to `handle_reinit_bloom_data/2` callback as first param. The second param is the custom state. Returning `{[any], any}` is the new bloomfilter data and new custom state. """ @callback handle_reinit_bloom_data([any], any) :: {[any], any} @doc """ Called after get `true` when check one key if member of bloomfilter. The first param is the key from `member?/2` and the second one is the custom state. Returning the boolean value. """ @callback handle_maybe_exist(any, any) :: boolean() @doc """ Called when add single key to bloomlist. The first param is the new key to add, and the second one is custom state. Returning the new custom state. """ @callback handle_add_single(any, any) :: any @doc """ Called when add a key list to bloomlist. The first param is the key list to add, and second one is the custom state. Returning the new custom state. """ @callback handle_add_list([any], any) :: any @doc """ Called when delete one key from bloomlist. `bloomfilter` not supported delete operation this callback just only for the bloomlist to update custom state. Returning the new custom state. """ @callback handle_delete(any, any) :: any defmacro __using__(_) do quote location: :keep do @behaviour BloomList @doc false def child_spec(opts) do %{ id: __MODULE__, start: {__MODULE__, :start_link, [opts]}, type: :worker, restart: :permanent, shutdown: 5000 } end @doc false def handle_reinit_bloom_data(data_list, state) do {data_list, state} end @doc false def handle_maybe_exist(_, _) do true end @doc false def handle_add_single(_, state) do state end @doc false def handle_add_list(_, state) do state end @doc false def handle_delete(_, state) do state end defoverridable handle_maybe_exist: 2, handle_add_single: 2, handle_add_list: 2, handle_delete: 2, handle_reinit_bloom_data: 2, child_spec: 1 end end @doc """ Start a bloomlist process linked to the current process. This function is used to start a `BloomList` process in a supervision tree, which will execute `GenServer.start_link/3` to start one real `GenServer` process.Then the init/1 callback for `GenServer` in this module will be executed and will keep one bloomfilter data block in its state. When `init/1` callback in this module execute, the `init_bloom_data/1` function in callback module will be executed. """ @spec start_link(module, any, Keyword.t()) :: GenServer.on_start() def start_link(mod, args, options) do name = Keyword.fetch!(options, :name) GenServer.start_link(__MODULE__, {mod, options, args}, name: name) end @doc """ Reinit bloom data for one bloomlist. """ @spec reinit_bloom_data(atom, [any]) :: :ok def reinit_bloom_data(bloom_name, data_list) do GenServer.call(bloom_name, {:reinit_bloom_data, data_list}) end @doc """ Check the key if member bloomlist. """ @spec member?(atom, any) :: boolean() def member?(bloom_name, key) do bloom_ets_list = :ets.tab2list(generate_ets_table_name(bloom_name)) mod = Keyword.get(bloom_ets_list, :mod) custom_state = Keyword.get(bloom_ets_list, :custom_state) Bloomex.member?(Keyword.get(bloom_ets_list, :bloom), key) and apply(mod, :handle_maybe_exist, [key, custom_state]) end @doc """ Check the key if member bloomlist using sync mode, it will execute by `GenServer` process serially. """ @spec sync_member?(atom, any) :: boolean() def sync_member?(bloom_name, key) do GenServer.call(bloom_name, {:member?, key}) end @doc """ Add one key to bloomlist. """ @spec add(atom, any) :: :ok def add(bloom_name, key) do GenServer.call(bloom_name, {:add, key}) end @doc """ Add key list to bloomlist. """ @spec add_list(atom, [any]) :: :ok def add_list(bloom_name, key_list) do GenServer.call(bloom_name, {:add_list, key_list}) end @doc """ Delete key from bloomlist. """ @spec delete(atom, any) :: :ok def delete(bloom_name, key) do GenServer.call(bloom_name, {:delete, key}) end @doc false def init({mod, options, args}) do bloom_name = Keyword.fetch!(options, :name) bloom_options = Keyword.get(options, :bloom_options, []) bloom_ets = generate_ets_table_name(bloom_name) _ = :ets.new(bloom_ets, [:named_table, :set, :public, {:read_concurrency, true}]) {data_list, custom_state} = mod.init_bloom_data(args) bloom = bloom_options |> init_empty_bloom() |> batch_add_data(data_list) :ets.insert(bloom_ets, [{:bloom, bloom}, {:mod, mod}, {:custom_state, custom_state}]) {:ok, %{ bloom: bloom, mod: mod, bloom_ets: bloom_ets, custom_state: custom_state, bloom_options: bloom_options }} end @doc false def handle_call( {:reinit_bloom_data, data_list}, _from, %{ bloom_ets: bloom_ets, mod: mod, bloom_options: bloom_options, custom_state: custom_state } = state ) do {data_list, custom_state} = mod.handle_reinit_bloom_data(data_list, custom_state) bloom = bloom_options |> init_empty_bloom() |> batch_add_data(data_list) :ets.insert(bloom_ets, [{:bloom, bloom}, {:custom_state, custom_state}]) {:reply, :ok, %{state | bloom: bloom, custom_state: custom_state}} end @doc false def handle_call( {:member?, key}, _from, %{bloom: bloom, mod: mod, custom_state: custom_state} = state ) do res = Bloomex.member?(bloom, key) and apply(mod, :handle_maybe_exist, [key, custom_state]) {:reply, res, state} end @doc false def handle_call( {:delete, key}, _from, %{bloom_ets: bloom_ets, custom_state: custom_state, mod: mod} = state ) do new_custom_state = mod.handle_delete(key, custom_state) :ets.insert(bloom_ets, {:custom_state, new_custom_state}) {:reply, :ok, %{state | custom_state: new_custom_state}} end @doc false def handle_call( {:add, key}, _from, %{bloom: bloom, bloom_ets: bloom_ets, custom_state: custom_state, mod: mod} = state ) do new_bloom = Bloomex.add(bloom, key) new_custom_state = mod.handle_add_single(key, custom_state) :ets.insert(bloom_ets, [{:bloom, new_bloom}, {:custom_state, new_custom_state}]) {:reply, :ok, %{state | bloom: new_bloom, custom_state: new_custom_state}} end @doc false def handle_call( {:add_list, key_list}, _from, %{bloom: bloom, bloom_ets: bloom_ets, custom_state: custom_state, mod: mod} = state ) do new_bloom = batch_add_data(bloom, key_list) new_custom_state = mod.handle_add_list(key_list, custom_state) :ets.insert(bloom_ets, [{:bloom, new_bloom}, {:custom_state, new_custom_state}]) {:reply, :ok, %{state | bloom: new_bloom, custom_state: new_custom_state}} end @doc false defp generate_ets_table_name(bloom_name) do String.to_atom("BloomList.#{bloom_name}") end @doc false defp init_empty_bloom(bloom_options) do capacity = Keyword.get(bloom_options, :capacity, 1000) error = Keyword.get(bloom_options, :error, 0.3) Bloomex.plain(capacity, error) end @doc false defp batch_add_data(bloom, data_list) do Enum.reduce(data_list, bloom, fn data, bloom -> Bloomex.add(bloom, data) end) end # __end_of_module__ end
lib/bloom_list.ex
0.858763
0.622832
bloom_list.ex
starcoder
defmodule LetterLinesElixir.BoardState do @moduledoc false alias LetterLinesElixir.BoardState alias LetterLinesElixir.BoardWord defstruct [:width, :height, :words] # Guard to check for adjacent characters defguard off_by_one(n1, n2) when (n1 - n2) in [-1, 1] @type t :: %BoardState{ width: integer(), height: integer(), words: [%BoardWord{}] } def new(words) do {max_x, max_y} = BoardWord.get_max_size(words) for x <- 0..max_x, y <- 0..max_y do _ = do_get_letter_at(words, x, y) end if !Enum.any?(0..max_x, fn x -> do_get_letter_at(words, x, 0) != :none end) do raise "No letters found in first row" end if !Enum.any?(0..max_y, fn y -> do_get_letter_at(words, 0, y) != :none end) do raise "No letters found in first column" end Enum.each(words, &check_word_end_touching(&1, words)) Enum.each(words, &check_parallel_touching(&1, words)) # Add one to max to handle zero based layout %BoardState{ width: max_x + 1, height: max_y + 1, words: words } end def get_usable_letter_list(%BoardState{} = state) do %BoardWord{word: word} = longest_word(state) String.graphemes(word) end def get_letter_at(%BoardState{words: words}, x, y) do do_get_letter_at(words, x, y) end # Do not forget: test using ExUnit.CaptureIO def print_board(%BoardState{width: width, height: height} = board_state) do for y <- 0..(height - 1) do 0..(width - 1) |> Enum.map(&get_display_ascii_letter_at(board_state, &1, y)) |> Enum.join("") |> IO.puts() end end def longest_word(%BoardState{words: words}) do longest_word(words) end def longest_word([word]), do: word def longest_word([%BoardWord{word: word1} = bw1, %BoardWord{word: word2} = bw2 | tail]) do if String.length(word1) > String.length(word2) do longest_word([bw1 | tail]) else longest_word([bw2 | tail]) end end def get_display_letter_at(%BoardState{} = board_state, x, y) do letter = BoardState.get_letter_at(board_state, x, y) cond do letter == :none -> :none !letter_revealed?(board_state, x, y) -> :hidden true -> letter end end def reveal_word(%BoardState{words: words} = board_state, word) do words |> Enum.map(fn %BoardWord{word: ^word} = board_word -> %BoardWord{board_word | revealed?: true} board_word -> board_word end) |> case do ^words -> {:error, :nothing_revealed} new_words -> {:ok, %BoardState{board_state | words: new_words}} end end defp letter_revealed?(%BoardState{words: words}, x, y) do words |> Enum.reject(&(BoardWord.get_letter_at(&1, x, y) == :none)) |> Enum.filter(& &1.revealed?) |> Kernel.!=([]) end defp do_get_letter_at(words, x, y) do words |> Enum.map(&BoardWord.get_letter_at(&1, x, y)) |> Enum.reject(&(&1 == :none)) |> Enum.uniq() |> case do [] -> :none [a] -> a [_ | _] = list -> raise "Multiple letters found at #{x}, #{y}: #{inspect(list)}" end end defp get_display_ascii_letter_at(%BoardState{} = board_state, x, y) do letter = BoardState.get_letter_at(board_state, x, y) cond do letter == :none -> "#" !letter_revealed?(board_state, x, y) -> "." true -> letter end end defp check_word_end_touching(%BoardWord{direction: :h, x: x, y: y, word: word, size: size}, words) do if do_get_letter_at(words, x - 1, y) != :none do raise "Letter found before horizontal word: #{word}" end if do_get_letter_at(words, x + size, y) != :none do raise "Letter found after horizontal word: #{word}" end end defp check_word_end_touching(%BoardWord{direction: :v, x: x, y: y, word: word, size: size}, words) do if do_get_letter_at(words, x, y - 1) != :none do raise "Letter found before vertical word: #{word}" end if do_get_letter_at(words, x, y + size) != :none do raise "Letter found after vertical word: #{word}" end end defp check_parallel_touching(%BoardWord{} = word, words) do Enum.each(words, &do_check_parallel_touching(&1, word)) end defp do_check_parallel_touching(%BoardWord{direction: d1}, %BoardWord{direction: d2}) when d1 != d2, do: :ok defp do_check_parallel_touching(%BoardWord{direction: :h, y: y1}, %BoardWord{direction: :h, y: y2}) when not off_by_one(y1, y2), do: :ok defp do_check_parallel_touching(%BoardWord{direction: :v, x: x1}, %BoardWord{direction: :v, x: x2}) when not off_by_one(x1, x2), do: :ok defp do_check_parallel_touching( %BoardWord{direction: :h, y: y1, size: size1} = word1, %BoardWord{direction: :h, y: y2, size: size2} = word2 ) do if Range.disjoint?(y1..(y1 + size1), y2..(y2 + size2 - 1)) do :ok else raise "These two horizontal words are parallel and touching: #{word1.word} and #{word2.word}" end end defp do_check_parallel_touching( %BoardWord{direction: :v, x: x1, size: size1} = word1, %BoardWord{direction: :v, x: x2, size: size2} = word2 ) do if Range.disjoint?(x1..(x1 + size1 - 1), x2..(x2 + size2 - 1)) do :ok else raise "These two vertical words are parallel and touching: #{word1.word} and #{word2.word}" end end end
lib/letter_lines_elixir/board_state.ex
0.572962
0.412353
board_state.ex
starcoder
defmodule ExInterval.Interval do @moduledoc """ It implements the interval type and range operations on the type, using directed roundings intervals, recognize the input as strings and performs operations between intervals. """ defstruct inf: nil, sup: nil @type interval :: %__MODULE__{inf: Float, sup: Float} alias ExInterval.Rounding @doc """ Returns a new Interval ## Parameters - new/1 or new/2 ## Examples iex> ExInterval.Interval.new(1.1) [1.1, 1.1] iex> ExInterval.Interval.new(1.1, 2.5) [1.1, 2.5] """ @spec new(number() | binary(), number() | binary()) :: list() def new([value1, value2]), do: new(value1, value2) def new(value) do real_number = cast_to_float(value) [real_number, real_number] end def new(value1, value2) do value1 = cast_to_float(value1) value2 = cast_to_float(value2) [min(value1, value2), max(value1, value2)] end @doc """ Returns the middle point of the interval ## Parameters - middle/1 ## Examples iex> ExInterval.Interval.middle(%ExInterval.Interval{inf: -10.0, sup: 5.0}) -2.5 """ @spec middle(interval() | list()) :: float() def middle(%__MODULE__{inf: inf, sup: sup}) do backup_mode = Rounding.get_mode() Rounding.set_mode_upward() mid = (inf + sup) / 2.0 Rounding.restore_mode(backup_mode) mid end def middle([inf, sup]), do: middle(%__MODULE__{inf: inf, sup: sup}) @doc """ Returns true if the value is an element or a subset of the interval ## Parameters - is_member?/2 ## Examples iex> ExInterval.Interval.is_member?(0.0, %ExInterval.Interval{inf: -1, sup: -0.1}) false iex> ExInterval.Interval.is_member?(0.0, %ExInterval.Interval{inf: -1, sup: 1}) true iex> ExInterval.Interval.is_member?(0.0, %ExInterval.Interval{inf: 0.1, sup: 1}) false """ @spec is_member?( number() | binary() | interval(), number() | binary() | list() | interval() ) :: boolean() def is_member?(value, [inf, sup]), do: is_member?(value, %__MODULE__{inf: inf, sup: sup}) def is_member?(first, second) do [first, second] = operators(first, second) contains(first, second) end @doc """ Binary plus operator ## Parameters - add/2 ## Examples iex> ExInterval.Interval.add(%ExInterval.Interval{inf: 0.25, sup: 0.5}, 2) [2.25, 2.5] iex> ExInterval.Interval.add("0.1", 0.1) [0.2, 0.2] """ @spec add( number() | binary() | interval() | list(), number() | binary() | interval() | list() ) :: list() def add([val1, val2], [val3, val4]), do: add(%__MODULE__{inf: val1, sup: val2}, %__MODULE__{inf: val3, sup: val4}) def add(first, second) do [first, second] = operators(first, second) plus(first, second) end @doc """ Binary minus operator ## Parameters - sub/2 ## Examples iex> ExInterval.Interval.sub(%ExInterval.Interval{inf: 0.25, sup: 0.5}, 2) [-1.75, -1.5] iex> ExInterval.Interval.sub(%ExInterval.Interval{inf: -0.75, sup: 0.75}, "2") [-2.75, -1.25] iex> ExInterval.Interval.sub("0.1", 0.1) [0.0, 0.0] """ @spec sub( number() | binary() | interval() | list(), number() | binary() | interval() | list() ) :: list() def sub([val1, val2], [val3, val4]), do: sub(%__MODULE__{inf: val1, sup: val2}, %__MODULE__{inf: val3, sup: val4}) def sub(first, second) do [first, second] = operators(first, second) minus(first, second) end @doc """ Multiplication operator ## Parameters - mul/2 ## Examples iex> ExInterval.Interval.mul(%ExInterval.Interval{inf: 0.25, sup: 0.5}, %ExInterval.Interval{inf: 2.0, sup: 3.0}) [0.5, 1.5] iex> ExInterval.Interval.mul(%ExInterval.Interval{inf: -0.75, sup: 0.75}, "2") [-1.5, 1.5] iex> ExInterval.Interval.mul(0.2, "0.1") [0.02, 0.020000000000000004] """ @spec mul( number() | binary() | interval() | list(), number() | binary() | interval() | list() ) :: list() def mul([val1, val2], [val3, val4]), do: mul(%__MODULE__{inf: val1, sup: val2}, %__MODULE__{inf: val3, sup: val4}) def mul(first, second) do [first, second] = operators(first, second) multiplication(first, second) end @doc """ Division operator ## Parameters - division/2 ## Examples iex> ExInterval.Interval.division(%ExInterval.Interval{inf: 0.25, sup: 0.5}, %ExInterval.Interval{inf: 2, sup: 4}) [0.0625, 0.25] iex> ExInterval.Interval.division(%ExInterval.Interval{inf: -0.75, sup: 0.75}, 2) [-0.375, 0.375] iex> ExInterval.Interval.division("0.1", 0.1) [1.0, 1.0] """ @spec division( number() | binary() | interval() | list(), number() | binary() | interval() | list() ) :: list() def division([val1, val2], [val3, val4]), do: div_int(%__MODULE__{inf: val1, sup: val2}, %__MODULE__{inf: val3, sup: val4}) def division(first, second) do [first, second] = operators(first, second) div_int(first, second) end @doc """ Calculates the machine epsilon ## Parameters - eps/0 ## Examples iex> ExInterval.Interval.eps 2.220446049250313e-16 """ @spec eps() :: number def eps, do: calculate_epsilon(1) defp calculate_epsilon(macheps) do case 1.0 + macheps / 2 > 1.0 do true -> calculate_epsilon(macheps / 2) false -> macheps end end @doc """ Calculates the absolute value of the interval ## Parameters - absolute/1 ## Examples iex> ExInterval.Interval.absolute([-1, 1]) 1.0 """ @spec absolute(interval() | list()) :: number() def absolute(%__MODULE__{inf: inf, sup: sup}) do max(abs(cast_to_float(inf)), abs(cast_to_float(sup))) end def absolute([inf, sup]), do: absolute(%__MODULE__{inf: inf, sup: sup}) @doc """ Returns the distance between supremum and infimum of the interval ## Parameters - diameter/1 ## Examples iex> ExInterval.Interval.diameter([-10, 1]) 11.0 """ @spec diameter(interval() | list()) :: number() def diameter(%__MODULE__{inf: inf, sup: sup}) do rounding_mode_backup = Rounding.get_mode() Rounding.set_mode_upward() diameter = cast_to_float(sup) - cast_to_float(inf) Rounding.restore_mode(rounding_mode_backup) diameter end def diameter([inf, sup]), do: diameter(%__MODULE__{inf: inf, sup: sup}) @doc """ Return the square root of the interval ## Parameters - sqrt/1 ## Examples iex> ExInterval.Interval.sqrt([9, 25]) [3.0, 5.0] """ @spec sqrt(interval() | list()) :: interval() def sqrt(%__MODULE__{inf: inf, sup: sup}) do backup_mode = Rounding.get_mode() Rounding.set_mode_downward() inf = :math.sqrt(inf) Rounding.set_mode_upward() sup = :math.sqrt(sup) Rounding.restore_mode(backup_mode) new(inf, sup) end def sqrt([inf, sup]), do: sqrt(%__MODULE__{inf: inf, sup: sup}) ## private defp contains(%__MODULE__{inf: x2, sup: y2}, %__MODULE__{inf: x1, sup: y1}) do x1 <= x2 and y1 >= y2 end defp operators(%__MODULE__{} = first, %__MODULE__{} = second), do: [first, second] defp operators(%__MODULE__{} = first, second), do: [first, create_interval(second)] defp operators(first, %__MODULE__{} = second), do: [create_interval(first), second] defp operators(first, second), do: [create_interval(first), create_interval(second)] defp create_interval(value) do real_number = cast_to_float(value) %__MODULE__{inf: real_number, sup: real_number} end defp div_int(%__MODULE__{inf: x1, sup: y1}, %__MODULE__{inf: x2, sup: y2}) do backup_mode = Rounding.get_mode() Rounding.set_mode_downward() inf = min(min(x1 / x2, x1 / y2), min(y1 / x2, y1 / y2)) Rounding.set_mode_upward() sup = max(max(x1 / x2, x1 / y2), max(y1 / x2, y1 / y2)) Rounding.restore_mode(backup_mode) new(inf, sup) end defp multiplication(%__MODULE__{inf: x1, sup: y1}, %__MODULE__{inf: x2, sup: y2}) do backup_mode = Rounding.get_mode() Rounding.set_mode_downward() inf = min(min(x1 * x2, x1 * y2), min(y1 * x2, y1 * y2)) Rounding.set_mode_upward() sup = max(max(x1 * x2, x1 * y2), max(y1 * x2, y1 * y2)) Rounding.restore_mode(backup_mode) new(inf, sup) end defp minus(%__MODULE__{} = first, %__MODULE__{} = second) do backup_mode = Rounding.get_mode() Rounding.set_mode_downward() inf = first.inf - second.inf Rounding.set_mode_upward() sup = first.sup - second.sup Rounding.restore_mode(backup_mode) new(inf, sup) end defp plus(%__MODULE__{} = first, %__MODULE__{} = second) do backup_mode = Rounding.get_mode() Rounding.set_mode_downward() inf = first.inf + second.inf Rounding.set_mode_upward() sup = first.sup + second.sup Rounding.restore_mode(backup_mode) new(inf, sup) end defp cast_to_float(value) when is_float(value), do: value defp cast_to_float(value) when is_integer(value) or is_binary(value) do Float.parse("#{value}") |> Kernel.elem(0) end end
lib/ex_interval/interval.ex
0.939874
0.607896
interval.ex
starcoder
defmodule Day18 do def part_one(input) do input |> sum_input() |> magnitude() end def part_two(input) do 18 |> input.contents_of(:stream) |> Enum.map(&String.trim/1) |> Enum.map(&parse/1) |> permutations() |> Enum.map(fn [a, b] -> add(a, b) end) |> Enum.map(&magnitude/1) |> Enum.max() end def permutations(list) do for h <- list, t <- list -- [h], do: [h, t] end def magnitude(l) when is_binary(l), do: l |> parse() |> magnitude() def magnitude(l), do: l |> do_magnitude() |> elem(0) defp do_magnitude(["[" | rest]) do {left_mag, rest} = do_magnitude(rest) {right_mag, rest} = do_magnitude(rest) {3 * left_mag + 2 * right_mag, rest} end defp do_magnitude(["]" | rest]), do: do_magnitude(rest) defp do_magnitude([n | rest]), do: {n, rest} def sum_input(input) do 18 |> input.contents_of(:stream) |> Enum.map(&String.trim/1) |> sum() end def sum(list) do list |> Enum.map(&parse/1) |> Enum.reduce(fn a, b -> add(b, a) end) end def add(a, b) when is_binary(a) and is_binary(b), do: add(parse(a), parse(b)) def add(a, b), do: (["["] ++ a ++ b ++ ["]"]) |> reduce() def reduce(l) when is_binary(l), do: l |> parse() |> reduce() def reduce(l) do reduced = l |> explode() |> split() if l == reduced do reduced else reduce(reduced) end end def explode(l) when is_binary(l), do: l |> parse() |> explode() def explode(l), do: do_explode(l, [], 0, 0) def split(l), do: do_split(l, []) defp do_split([], result), do: Enum.reverse(result) defp do_split([n | rest], result) when is_number(n) and n >= 10, do: Enum.reverse(result) ++ ["[", div(n, 2), div(n, 2) + rem(n, 2), "]" | rest] defp do_split([n | rest], result), do: do_split(rest, [n | result]) defp do_explode([], exploded, 0, _prev), do: Enum.reverse(exploded) defp do_explode(["[" | rest], exploded, depth, p), do: do_explode(rest, ["[" | exploded], depth + 1, p) defp do_explode(["]" | rest], exploded, depth, p), do: do_explode(rest, ["]" | exploded], depth - 1, p) defp do_explode([a, b, "]" | rest], ["[" | exploded], 5, p), do: do_explode(rest, [0 | backfill(exploded, a + p, [])], 4, b) defp do_explode([c | rest], exploded, depth, prev), do: do_explode(rest, [c + prev | exploded], depth, 0) defp backfill([], _a, backfilled), do: Enum.reverse(backfilled) defp backfill([n | rest], a, backfilled) when is_number(n), do: Enum.reverse(backfilled) ++ [n + a | rest] defp backfill([c | rest], a, backfilled), do: backfill(rest, a, [c | backfilled]) def parse(l) do l |> String.split("", trim: true) |> Enum.reject(fn c -> c == "," end) |> Enum.map(fn "[" -> "[" "]" -> "]" c -> String.to_integer(c) end) end end
year_2021/lib/day_18.ex
0.655887
0.505981
day_18.ex
starcoder
defmodule Livebook.Notebook.Explore do @moduledoc false defmodule NotFoundError do @moduledoc false defexception [:slug, plug_status: 404] def message(%{slug: slug}) do "could not find an example notebook matching #{inspect(slug)}" end end @type notebook_info :: %{ slug: String.t(), livemd: String.t(), title: String.t(), description: String.t(), cover_url: String.t(), images: images() } @type images :: %{String.t() => binary()} infos = [ %{ path: Path.join(__DIR__, "explore/intro_to_livebook.livemd"), description: "Get to know Livebook, see how it works and explore its features.", cover_url: "/images/logo.png" }, %{ path: Path.join(__DIR__, "explore/distributed_portals_with_elixir.livemd"), description: "A fast-paced introduction to the Elixir language by building distributed data-transfer portals.", cover_url: "/images/elixir-portal.jpeg", image_names: ["portal-drop.jpeg", "portal-list.jpeg"] }, %{ path: Path.join(__DIR__, "explore/elixir_and_livebook.livemd"), description: "Learn how to use some of Elixir and Livebook's unique features together.", cover_url: "/images/elixir.png" }, %{ path: Path.join(__DIR__, "explore/intro_to_vega_lite.livemd"), description: "Learn how to quickly create numerous plots for your data.", cover_url: "/images/vega_lite.png" }, %{ path: Path.join(__DIR__, "explore/intro_to_kino.livemd"), description: "Display and control rich and interactive widgets in Livebook.", cover_url: "/images/kino.png" }, %{ path: Path.join(__DIR__, "explore/intro_to_nx.livemd"), description: "Enter Numerical Elixir, experience the power of multi-dimensional arrays of numbers.", cover_url: "/images/nx.png" }, # %{ # path: Path.join(__DIR__, "explore/intro_to_axon.livemd"), # description: "Build Neural Networks in Elixir using a high-level, composable API.", # cover_url: "/images/axon.png" # }, %{ path: Path.join(__DIR__, "explore/vm_introspection.livemd"), description: "Extract and visualize information about a remote running node.", cover_url: "/images/vm_introspection.png" } ] notebook_infos = for info <- infos do path = Map.fetch!(info, :path) @external_resource path markdown = File.read!(path) # Parse the file to ensure no warnings and read the title. # However, in the info we keep just the file contents to save on memory. {notebook, []} = Livebook.LiveMarkdown.Import.notebook_from_markdown(markdown) images = info |> Map.get(:image_names, []) |> Map.new(fn image_name -> path = Path.join([Path.dirname(path), "images", image_name]) content = File.read!(path) {image_name, content} end) slug = info[:slug] || path |> Path.basename() |> Path.rootname() |> String.replace("_", "-") %{ slug: slug, livemd: markdown, title: notebook.name, description: Map.fetch!(info, :description), cover_url: Map.fetch!(info, :cover_url), images: images } end @doc """ Returns a list of example notebooks with metadata. """ @spec notebook_infos() :: list(notebook_info()) def notebook_infos(), do: unquote(Macro.escape(notebook_infos)) @doc """ Finds explore notebook by slug and returns the parsed data structure. Returns the notebook along with the images it uses as preloaded binaries. """ @spec notebook_by_slug!(String.t()) :: {Livebook.Notebook.t(), images()} def notebook_by_slug!(slug) do notebook_infos() |> Enum.find(&(&1.slug == slug)) |> case do nil -> raise NotFoundError, slug: slug notebook_info -> {notebook, []} = Livebook.LiveMarkdown.Import.notebook_from_markdown(notebook_info.livemd) {notebook, notebook_info.images} end end end
lib/livebook/notebook/explore.ex
0.811041
0.580738
explore.ex
starcoder
defmodule Ockam.SecureChannel.KeyEstablishmentProtocol.XX.Protocol do @moduledoc false alias Ockam.Vault defstruct [ # handle to a vault :vault, # identity keypair, reference in vault :s, # ephemeral keypair, reference in vault :e, # remote peer's identity public key :rs, # remote peer's ephemeral public key :re, # chaining key ck :ck, # encryption key k :k, # counter-based nonce n :n, # transcript hash, that hashes all the data that’s been sent and received. :h, # a prologue that is hashed into h :prologue, # payload for message1 :m1_payload, # payload for message2 :m2_payload, # payload for message2 :m3_payload ] @default_prologue "" @default_m1_payload "" @default_m2_payload "" @default_m3_payload "" @protocol_name "Noise_XX_25519_AESGCM_SHA256" defmacro zero_padded_protocol_name do quote bind_quoted: binding() do padding_size = (32 - byte_size(@protocol_name)) * 8 <<@protocol_name, 0::size(padding_size)>> end end def setup(options, data) do with {:ok, protocol_state} <- setup_vault(options, %__MODULE__{}), {:ok, protocol_state} <- setup_s(options, protocol_state), {:ok, protocol_state} <- setup_e(options, protocol_state), {:ok, protocol_state} <- setup_h(protocol_state), {:ok, protocol_state} <- setup_ck(protocol_state), {:ok, protocol_state} <- setup_prologe(options, protocol_state), {:ok, protocol_state} <- setup_message_payloads(options, protocol_state) do data = Map.put(data, :xx_key_establishment_state, protocol_state) {:ok, data} end end defp setup_vault(options, state) do case Keyword.get(options, :vault) do nil -> {:error, :vault_option_is_nil} vault -> {:ok, %{state | vault: vault}} end end defp setup_s(options, state) do case Keyword.get(options, :identity_keypair) do nil -> {:error, :identity_keypair_option_is_nil} %{private: _priv, public: _pub} = s -> {:ok, %{state | s: s}} vault_handle -> turn_vault_private_key_handle_to_keypair(:s, vault_handle, state) end end defp setup_e(options, state) do case Keyword.get(options, :ephemeral_keypair) do nil -> generate_e(state) %{private: _priv, public: _pub} = e -> {:ok, %{state | e: e}} vault_handle -> turn_vault_private_key_handle_to_keypair(:e, vault_handle, state) end end defp turn_vault_private_key_handle_to_keypair(s_or_e, handle, %{vault: vault} = state) do with {:ok, public_key} <- Vault.secret_publickey_get(vault, handle) do state = Map.put(state, s_or_e, %{private: handle, public: public_key}) {:ok, state} end end defp generate_e(%{vault: vault} = state) do with {:ok, private_key} <- Vault.secret_generate(vault, type: :curve25519), {:ok, public_key} <- Vault.secret_publickey_get(vault, private_key) do e = %{private: private_key, public: public_key} {:ok, %{state | e: e}} else {:error, reason} -> {:error, {:could_not_setup_e, reason}} end end defp setup_h(state) do h = zero_padded_protocol_name() {:ok, %{state | h: h}} end defp setup_ck(%{vault: vault} = state) do case Vault.secret_import(vault, [type: :buffer], zero_padded_protocol_name()) do {:ok, ck} -> {:ok, %{state | ck: ck}} {:error, reason} -> {:error, {:could_not_setup_ck, reason}} end end defp setup_prologe(options, state) do prologue = Keyword.get(options, :prologue, @default_prologue) with {:ok, state} <- mix_hash(state, prologue) do {:ok, %{state | prologue: prologue}} end end defp setup_message_payloads(options, state) do state = state |> Map.put(:m1_payload, Keyword.get(options, :message1_payload, @default_m1_payload)) |> Map.put(:m2_payload, Keyword.get(options, :message2_payload, @default_m2_payload)) |> Map.put(:m3_payload, Keyword.get(options, :message3_payload, @default_m3_payload)) {:ok, state} end def encode(message_name, %{xx_key_establishment_state: protocol_state} = data) when message_name in [:message1, :message2, :message3] do encoder = String.to_existing_atom("encode_" <> Atom.to_string(message_name)) return_value = apply(__MODULE__, encoder, [protocol_state]) case return_value do {:ok, encoded, protocol_state} -> ## TODO: optimise double encoding of binaries encoded_payload = :bare.encode(encoded, :data) {:ok, encoded_payload, %{data | xx_key_establishment_state: protocol_state}} {:error, reason} -> {:error, {:failed_to_encode, message_name, reason}} end end def decode(message_name, message_payload, %{xx_key_establishment_state: protocol_state} = data) when message_name in [:message1, :message2, :message3] do ## TODO: optimise double encoding of binaries {:ok, message, ""} = :bare.decode(message_payload, :data) decoder = String.to_existing_atom("decode_" <> Atom.to_string(message_name)) return_value = apply(__MODULE__, decoder, [message, protocol_state]) case return_value do {:ok, payload, protocol_state} -> {:ok, payload, %{data | xx_key_establishment_state: protocol_state}} {:error, reason} -> {:error, {:failed_to_decode, message_name, reason}} end end def encode_message1(%{e: e, m1_payload: payload} = state) do with {:ok, state} <- mix_hash(state, e.public), {:ok, state} <- mix_hash(state, payload) do {:ok, e.public <> payload, state} end end def encode_message2(%{e: e, s: s, re: re, m2_payload: payload} = state) do with {:ok, state} <- mix_hash(state, e.public), {:ok, shared_secret} <- dh(state, e, re), {:ok, state} <- mix_key(state, shared_secret), {:ok, state, encrypted_s_and_tag} <- encrypt_and_hash(state, s.public), {:ok, shared_secret} <- dh(state, s, re), {:ok, state} <- mix_key(state, shared_secret), {:ok, state, encrypted_payload_and_tag} <- encrypt_and_hash(state, payload) do {:ok, e.public <> encrypted_s_and_tag <> encrypted_payload_and_tag, state} end end def encode_message3(%{s: s, re: re, m3_payload: payload} = state) do with {:ok, state, encrypted_s_and_tag} <- encrypt_and_hash(state, s.public), {:ok, shared_secret} <- dh(state, s, re), {:ok, state} <- mix_key(state, shared_secret), {:ok, state, encrypted_payload_and_tag} <- encrypt_and_hash(state, payload) do {:ok, encrypted_s_and_tag <> encrypted_payload_and_tag, state} end end def decode_message1(message, state) do with {:ok, re, payload} <- parse_message1(message), {:ok, state} <- mix_hash(state, re), {:ok, state} <- mix_hash(state, payload) do {:ok, payload, %{state | re: re}} end end def decode_message2(message, %{e: e} = state) do with {:ok, re, encrypted_rs_and_tag, encrypted_payload_and_tag} <- parse_message2(message), {:ok, state} <- mix_hash(state, re), {:ok, shared_secret} <- dh(state, e, re), {:ok, state} <- mix_key(state, shared_secret), {:ok, state, rs} <- decrypt_and_hash(state, encrypted_rs_and_tag), {:ok, shared_secret} <- dh(state, e, rs), {:ok, state} <- mix_key(state, shared_secret), {:ok, state, payload} <- decrypt_and_hash(state, encrypted_payload_and_tag) do {:ok, payload, %{state | re: re, rs: rs}} end end def decode_message3(message, %{e: e} = state) do with {:ok, encrypted_rs_and_tag, encrypted_payload_and_tag} <- parse_message3(message), {:ok, state, rs} <- decrypt_and_hash(state, encrypted_rs_and_tag), {:ok, shared_secret} <- dh(state, e, rs), {:ok, state} <- mix_key(state, shared_secret), {:ok, state, payload} <- decrypt_and_hash(state, encrypted_payload_and_tag) do {:ok, payload, %{state | rs: rs}} end end def parse_message1(<<re::32-bytes, payload::binary>>), do: {:ok, re, payload} def parse_message1(message), do: {:error, {:unexpected_structure, :message1, message}} def parse_message2(<<re::32-bytes, encrypted_rs_and_tag::48-bytes, rest::binary>>) do encrypted_payload_and_tag = rest {:ok, re, encrypted_rs_and_tag, encrypted_payload_and_tag} end def parse_message2(message), do: {:error, {:unexpected_structure, :message2, message}} def parse_message3(<<encrypted_rs_and_tag::48-bytes, encrypted_payload_and_tag::binary>>), do: {:ok, encrypted_rs_and_tag, encrypted_payload_and_tag} def parse_message3(message), do: {:error, {:unexpected_structure, :message3, message}} def mix_hash(%{vault: vault, h: h} = state, value) do case Vault.sha256(vault, h <> value) do {:ok, h} -> {:ok, %{state | h: h}} error -> {:error, {:could_not_mix_hash, {state, value, error}}} end end def mix_key(%{vault: vault, ck: ck} = state, input_key_material) do ck_attributes = %{type: :buffer, length: 32, persistence: :ephemeral} k_attributes = %{type: :aes, length: 32, persistence: :ephemeral} kdf_result = Vault.hkdf_sha256(vault, ck, input_key_material, [ck_attributes, k_attributes]) with {:ok, [ck, k]} <- kdf_result do {:ok, %{state | n: 0, ck: ck, k: k}} end end def dh(%{vault: vault}, keypair, peer_public) do Vault.ecdh(vault, keypair.private, peer_public) end def encrypt_and_hash(%{vault: vault, k: k, n: n, h: h} = state, plaintext) do with {:ok, k} <- Vault.secret_export(vault, k), {:ok, k} <- Vault.secret_import(vault, [type: :aes], k), {:ok, ciphertext_and_tag} <- Vault.aead_aes_gcm_encrypt(vault, k, n, h, plaintext), :ok <- Vault.secret_destroy(vault, k), {:ok, state} <- mix_hash(state, ciphertext_and_tag) do {:ok, %{state | n: n + 1}, ciphertext_and_tag} end end def decrypt_and_hash(%{vault: vault, k: k, n: n, h: h} = state, ciphertext_and_tag) do with {:ok, k} <- Vault.secret_export(vault, k), {:ok, k} <- Vault.secret_import(vault, [type: :aes], k), {:ok, plaintext} <- Vault.aead_aes_gcm_decrypt(vault, k, n, h, ciphertext_and_tag), :ok <- Vault.secret_destroy(vault, k), {:ok, state} <- mix_hash(state, ciphertext_and_tag) do {:ok, %{state | n: n + 1}, plaintext} end end def split(%{xx_key_establishment_state: %{vault: vault, ck: ck, h: h}} = data) do k1_attributes = %{type: :aes, length: 32, persistence: :ephemeral} k2_attributes = %{type: :aes, length: 32, persistence: :ephemeral} with {:ok, [k1, k2]} <- Vault.hkdf_sha256(vault, ck, [k1_attributes, k2_attributes]) do {:ok, {k1, k2, h}, data} end end end
implementations/elixir/ockam/ockam/lib/ockam/secure_channel/key_establishment_protocol/xx/protocol.ex
0.692122
0.430925
protocol.ex
starcoder
defmodule RethinkDB.Connection do @moduledoc """ A module for managing connections. A `Connection` object is a process that can be started in various ways. It is recommended to start it as part of a supervision tree with a name: worker(RethinkDB.Connection, [[port: 28015, host: 'localhost', name: :rethinkdb_connection]]) Connections will by default connect asynchronously. If a connection fails, we retry with an exponential backoff. All queries will return `%RethinkDB.Exception.ConnectionClosed{}` until the connection is established. If `:sync_connect` is set to `true` then the process will crash if we fail to connect. It's recommended to only use this if the database is on the same host or if a rethinkdb proxy is running on the same host. If there's any chance of a network partition, it's recommended to stick with the default behavior. """ use Connection require Logger alias RethinkDB.Connection.Request alias RethinkDB.Connection.Transport @doc """ A convenience macro for naming connections. For convenience we provide the `use RethinkDB.Connection` macro, which automatically registers itself under the module name: defmodule FooDatabase, do: use RethinkDB.Connection Then in the supervision tree: worker(FooDatabase, [[port: 28015, host: 'localhost']]) When `use RethinkDB.Connection` is called, it will define: * `start_link` * `stop` * `run` All of these only differ from the normal `RethinkDB.Connection` functions in that they don't accept a connection. They will use the current module as the process name. `start_link` will start the connection under the module name. If you attempt to provide a name to `start_link`, it will raise an `ArgumentError`. """ defmacro __using__(_opts) do quote location: :keep do def start_link(opts \\ []) do if Dict.has_key?(opts, :name) && opts[:name] != __MODULE__ do # The whole point of this macro is to provide an implicit process # name, so subverting it is considered an error. raise ArgumentError.exception( "Process name #{inspect opts[:name]} conflicts with implicit name #{inspect __MODULE__} provided by `use RethinkDB.Connection`" ) end RethinkDB.Connection.start_link(Dict.put_new(opts, :name, __MODULE__)) end def run(query, opts \\ []) do RethinkDB.Connection.run(query, __MODULE__, opts) end def noreply_wait(timeout \\ 5000) do RethinkDB.Connection.noreply_wait(__MODULE__, timeout) end def stop do RethinkDB.Connection.stop(__MODULE__) end defoverridable [ start_link: 1, start_link: 0 ] end end @doc """ Stop the connection. Stops the given connection. """ def stop(pid) do Connection.cast(pid, :stop) end @doc """ Run a query on a connection. Supports the following options: * `timeout` - How long to wait for a response * `db` - Default database to use for query. Can also be specified as part of the query. * `durability` - possible values are 'hard' and 'soft'. In soft durability mode RethinkDB will acknowledge the write immediately after receiving it, but before the write has been committed to disk. * `noreply` - set to true to not receive the result object or cursor and return immediately. * `profile` - whether or not to return a profile of the query’s execution (default: false). """ def run(query, conn, opts \\ []) do timeout = Dict.get(opts, :timeout, 5000) conn_opts = Dict.drop(opts, [:timeout]) noreply = Dict.get(opts, :noreply, false) conn_opts = Connection.call(conn, :conn_opts) |> Dict.take([:db]) |> Dict.merge(conn_opts) query = prepare_and_encode(query, conn_opts) msg = case noreply do true -> {:query_noreply, query} false -> {:query, query} end case Connection.call(conn, msg, timeout) do {response, token} -> RethinkDB.Response.parse(response, token, conn) :noreply -> :ok result -> result end end @doc """ Fetch the next dataset for a feed. Since a feed is tied to a particular connection, no connection is needed when calling `next`. """ def next(%{token: token, pid: pid}) do case Connection.call(pid, {:continue, token}, :infinity) do {response, token} -> RethinkDB.Response.parse(response, token, pid) x -> x end end @doc """ Closes a feed. Since a feed is tied to a particular connection, no connection is needed when calling `close`. """ def close(%{token: token, pid: pid}) do {response, token} = Connection.call(pid, {:stop, token}, :infinity) RethinkDB.Response.parse(response, token, pid) end @doc """ `noreply_wait` ensures that previous queries with the noreply flag have been processed by the server. Note that this guarantee only applies to queries run on the given connection. """ def noreply_wait(conn, timeout \\ 5000) do {response, token} = Connection.call(conn, :noreply_wait, timeout) case RethinkDB.Response.parse(response, token, conn) do %RethinkDB.Response{data: %{"t" => 4}} -> :ok r -> r end end defp prepare_and_encode(query, opts) do query = RethinkDB.Prepare.prepare(query) # Right now :db can still be nil so we need to remove it opts = Enum.into(opts, %{}, fn {:db, db} -> {:db, RethinkDB.Prepare.prepare(RethinkDB.Query.db(db))} {k, v} -> {k, v} end) query = [1, query, opts] Poison.encode!(query) end @doc """ Start connection as a linked process Accepts a `Dict` of options. Supported options: * `:host` - hostname to use to connect to database. Defaults to `'localhost'`. * `:port` - port on which to connect to database. Defaults to `28015`. * `:auth_key` - authorization key to use with database. Defaults to `nil`. * `:db` - default database to use with queries. Defaults to `nil`. * `:sync_connect` - whether to have `init` block until a connection succeeds. Defaults to `false`. * `:max_pending` - Hard cap on number of concurrent requests. Defaults to `10000` * `:ssl` - a dict of options. Support SSL options: * `:ca_certs` - a list of file paths to cacerts. """ def start_link(opts \\ []) do args = Dict.take(opts, [:host, :port, :auth_key, :db, :sync_connect, :ssl, :max_pending]) Connection.start_link(__MODULE__, args, opts) end def init(opts) do host = case Dict.get(opts, :host, 'localhost') do x when is_binary(x) -> String.to_char_list x x -> x end sync_connect = Dict.get(opts, :sync_connect, false) ssl = Dict.get(opts, :ssl) opts = Dict.put(opts, :host, host) |> Dict.put_new(:port, 28015) |> Dict.put_new(:auth_key, "") |> Dict.put_new(:max_pending, 10000) |> Dict.drop([:sync_connect]) |> Enum.into(%{}) {transport, transport_opts} = case ssl do nil -> {%Transport.TCP{}, []} x -> {%Transport.SSL{}, Enum.map(Dict.fetch!(x, :ca_certs), &({:cacertfile, &1})) ++ [verify: :verify_peer]} end state = %{ pending: %{}, current: {:start, ""}, token: 0, config: Map.put(opts, :transport, {transport, transport_opts}) } case sync_connect do true -> case connect(:sync, state) do {:backoff, _, _} -> {:stop, :econnrefused} x -> x end false -> {:connect, :init, state} end end def connect(_info, state = %{config: %{host: host, port: port, auth_key: auth_key, transport: {transport, transport_opts}}}) do case Transport.connect(transport, host, port, [active: false, mode: :binary] ++ transport_opts) do {:ok, socket} -> case handshake(socket, auth_key) do {:error, _} -> {:stop, :bad_handshake, state} :ok -> :ok = Transport.setopts(socket, [active: :once]) # TODO: investigate timeout vs hibernate {:ok, Dict.put(state, :socket, socket)} end {:error, :econnrefused} -> backoff = min(Dict.get(state, :timeout, 1000), 64000) {:backoff, backoff, Dict.put(state, :timeout, backoff*2)} end end def disconnect(info, state = %{pending: pending}) do pending |> Enum.each(fn {_token, pid} -> Connection.reply(pid, %RethinkDB.Exception.ConnectionClosed{}) end) new_state = state |> Map.delete(:socket) |> Map.put(:pending, %{}) |> Map.put(:current, {:start, ""}) # TODO: should we reconnect? {:stop, info, new_state} end def handle_call(:conn_opts, _from, state = %{config: opts}) do {:reply, opts, state} end def handle_call(_, _, state = %{pending: pending, config: %{max_pending: max_pending}}) when map_size(pending) > max_pending do {:reply, %RethinkDB.Exception.TooManyRequests{}, state} end def handle_call({:query_noreply, query}, _from, state = %{token: token}) do new_token = token + 1 token = << token :: little-size(64) >> {:noreply, state} = Request.make_request(query, token, :noreply, %{state | token: new_token}) {:reply, :noreply, state} end def handle_call({:query, query}, from, state = %{token: token}) do new_token = token + 1 token = << token :: little-size(64) >> Request.make_request(query, token, from, %{state | token: new_token}) end def handle_call({:continue, token}, from, state) do query = "[2]" Request.make_request(query, token, from, state) end def handle_call({:stop, token}, from, state) do query = "[3]" Request.make_request(query, token, from, state) end def handle_call(:noreply_wait, from, state = %{token: token}) do query = "[4]" new_token = <PASSWORD> + 1 token = << token :: little-size(64) >> Request.make_request(query, token, from, %{state | token: new_token}) end def handle_cast(:stop, state) do {:disconnect, :normal, state}; end def handle_info({proto, _port, data}, state = %{socket: socket}) when proto in [:tcp, :ssl] do :ok = Transport.setopts(socket, [active: :once]) Request.handle_recv(data, state) end def handle_info({closed_msg, _port}, state) when closed_msg in [:ssl_closed, :tcp_closed] do {:disconnect, closed_msg, state} end def handle_info(msg, state) do Logger.debug("Received unhandled info: #{inspect(msg)} with state #{inspect state}") {:noreply, state} end def terminate(_reason, %{socket: socket}) do Transport.close(socket) :ok end def terminate(_reason, _state) do :ok end defp handshake(socket, auth_key) do :ok = Transport.send(socket, << 0x400c2d20 :: little-size(32) >>) :ok = Transport.send(socket, << :erlang.iolist_size(auth_key) :: little-size(32) >>) :ok = Transport.send(socket, auth_key) :ok = Transport.send(socket, << 0x7e6970c7 :: little-size(32) >>) case recv_until_null(socket, "") do "SUCCESS" -> :ok error = {:error, _} -> error end end defp recv_until_null(socket, acc) do case Transport.recv(socket, 1) do {:ok, "\0"} -> acc {:ok, a} -> recv_until_null(socket, acc <> a) x = {:error, _} -> x end end end
lib/rethinkdb/connection.ex
0.848329
0.531027
connection.ex
starcoder
defmodule AWS.KinesisAnalytics do @moduledoc """ Amazon Kinesis Analytics ## Overview This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). This is the *Amazon Kinesis Analytics v1 API Reference*. The Amazon Kinesis Analytics Developer Guide provides additional information. """ alias AWS.Client alias AWS.Request def metadata do %AWS.ServiceMetadata{ abbreviation: "Kinesis Analytics", api_version: "2015-08-14", content_type: "application/x-amz-json-1.1", credential_scope: nil, endpoint_prefix: "kinesisanalytics", global?: false, protocol: "json", service_id: "Kinesis Analytics", signature_version: "v4", signing_name: "kinesisanalytics", target_prefix: "KinesisAnalytics_20150814" } end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Adds a CloudWatch log stream to monitor application configuration errors. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see [Working with Amazon CloudWatch Logs](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). """ def add_application_cloud_watch_logging_option(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "AddApplicationCloudWatchLoggingOption", input, options ) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Adds a streaming source to your Amazon Kinesis application. For conceptual information, see [Configuring Application Input](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). You can add a streaming source either when you create an application or you can use this operation to add a streaming source after you create an application. For more information, see [CreateApplication](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_CreateApplication.html). Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the [DescribeApplication](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) operation to find the current application version. This operation requires permissions to perform the `kinesisanalytics:AddApplicationInput` action. """ def add_application_input(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "AddApplicationInput", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Adds an [InputProcessingConfiguration](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputProcessingConfiguration.html) to an application. An input processor preprocesses records on the input stream before the application's SQL code executes. Currently, the only input processor available is [AWS Lambda](https://docs.aws.amazon.com/lambda/). """ def add_application_input_processing_configuration(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "AddApplicationInputProcessingConfiguration", input, options ) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Adds an external destination to your Amazon Kinesis Analytics application. If you want Amazon Kinesis Analytics to deliver data from an in-application stream within your application to an external destination (such as an Amazon Kinesis stream, an Amazon Kinesis Firehose delivery stream, or an AWS Lambda function), you add the relevant configuration to your application using this operation. You can configure one or more outputs for your application. Each output configuration maps an in-application stream and an external destination. You can use one of the output configurations to deliver data from your in-application error stream to an external destination so that you can analyze the errors. For more information, see [Understanding Application Output (Destination)](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html). Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the [DescribeApplication](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) operation to find the current application version. For the limits on the number of application inputs and outputs you can configure, see [Limits](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html). This operation requires permissions to perform the `kinesisanalytics:AddApplicationOutput` action. """ def add_application_output(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "AddApplicationOutput", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Adds a reference data source to an existing application. Amazon Kinesis Analytics reads reference data (that is, an Amazon S3 object) and creates an in-application table within your application. In the request, you provide the source (S3 bucket name and object key name), name of the in-application table to create, and the necessary mapping information that describes how data in Amazon S3 object maps to columns in the resulting in-application table. For conceptual information, see [Configuring Application Input](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). For the limits on data sources you can add to your application, see [Limits](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html). This operation requires permissions to perform the `kinesisanalytics:AddApplicationOutput` action. """ def add_application_reference_data_source(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "AddApplicationReferenceDataSource", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Creates an Amazon Kinesis Analytics application. You can configure each application with one streaming source as input, application code to process the input, and up to three destinations where you want Amazon Kinesis Analytics to write the output data from your application. For an overview, see [How it Works](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works.html). In the input configuration, you map the streaming source to an in-application stream, which you can think of as a constantly updating table. In the mapping, you must provide a schema for the in-application stream and map each data column in the in-application stream to a data element in the streaming source. Your application code is one or more SQL statements that read input data, transform it, and generate output. Your application code can create one or more SQL artifacts like SQL streams or pumps. In the output configuration, you can configure the application to write data from in-application streams created in your applications to up to three destinations. To read data from your source stream or write data to destination streams, Amazon Kinesis Analytics needs your permissions. You grant these permissions by creating IAM roles. This operation requires permissions to perform the `kinesisanalytics:CreateApplication` action. For introductory exercises to create an Amazon Kinesis Analytics application, see [Getting Started](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/getting-started.html). """ def create_application(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "CreateApplication", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Deletes the specified application. Amazon Kinesis Analytics halts application execution and deletes the application, including any application artifacts (such as in-application streams, reference table, and application code). This operation requires permissions to perform the `kinesisanalytics:DeleteApplication` action. """ def delete_application(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DeleteApplication", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Deletes a CloudWatch log stream from an application. For more information about using CloudWatch log streams with Amazon Kinesis Analytics applications, see [Working with Amazon CloudWatch Logs](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/cloudwatch-logs.html). """ def delete_application_cloud_watch_logging_option(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "DeleteApplicationCloudWatchLoggingOption", input, options ) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Deletes an [InputProcessingConfiguration](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_InputProcessingConfiguration.html) from an input. """ def delete_application_input_processing_configuration(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "DeleteApplicationInputProcessingConfiguration", input, options ) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Deletes output destination configuration from your application configuration. Amazon Kinesis Analytics will no longer write data from the corresponding in-application stream to the external output destination. This operation requires permissions to perform the `kinesisanalytics:DeleteApplicationOutput` action. """ def delete_application_output(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DeleteApplicationOutput", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Deletes a reference data source configuration from the specified application configuration. If the application is running, Amazon Kinesis Analytics immediately removes the in-application table that you created using the [AddApplicationReferenceDataSource](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_AddApplicationReferenceDataSource.html) operation. This operation requires permissions to perform the `kinesisanalytics.DeleteApplicationReferenceDataSource` action. """ def delete_application_reference_data_source(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "DeleteApplicationReferenceDataSource", input, options ) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Returns information about a specific Amazon Kinesis Analytics application. If you want to retrieve a list of all applications in your account, use the [ListApplications](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_ListApplications.html) operation. This operation requires permissions to perform the `kinesisanalytics:DescribeApplication` action. You can use `DescribeApplication` to get the current application versionId, which you need to call other operations such as `Update`. """ def describe_application(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeApplication", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Infers a schema by evaluating sample records on the specified streaming source (Amazon Kinesis stream or Amazon Kinesis Firehose delivery stream) or S3 object. In the response, the operation returns the inferred schema and also the sample records that the operation used to infer the schema. You can use the inferred schema when configuring a streaming source for your application. For conceptual information, see [Configuring Application Input](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html). Note that when you create an application using the Amazon Kinesis Analytics console, the console uses this operation to infer a schema and show it in the console user interface. This operation requires permissions to perform the `kinesisanalytics:DiscoverInputSchema` action. """ def discover_input_schema(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DiscoverInputSchema", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Returns a list of Amazon Kinesis Analytics applications in your account. For each application, the response includes the application name, Amazon Resource Name (ARN), and status. If the response returns the `HasMoreApplications` value as true, you can send another request by adding the `ExclusiveStartApplicationName` in the request body, and set the value of this to the last application name from the previous response. If you want detailed information about a specific application, use [DescribeApplication](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html). This operation requires permissions to perform the `kinesisanalytics:ListApplications` action. """ def list_applications(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ListApplications", input, options) end @doc """ Retrieves the list of key-value tags assigned to the application. For more information, see [Using Tagging](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-tagging.html). """ def list_tags_for_resource(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ListTagsForResource", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Starts the specified Amazon Kinesis Analytics application. After creating an application, you must exclusively call this operation to start your application. After the application starts, it begins consuming the input data, processes it, and writes the output to the configured destination. The application status must be `READY` for you to start an application. You can get the application status in the console or using the [DescribeApplication](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) operation. After you start the application, you can stop the application from processing the input by calling the [StopApplication](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_StopApplication.html) operation. This operation requires permissions to perform the `kinesisanalytics:StartApplication` action. """ def start_application(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "StartApplication", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Stops the application from processing input data. You can stop an application only if it is in the running state. You can use the [DescribeApplication](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/API_DescribeApplication.html) operation to find the application state. After the application is stopped, Amazon Kinesis Analytics stops reading data from the input, the application stops processing data, and there is no output written to the destination. This operation requires permissions to perform the `kinesisanalytics:StopApplication` action. """ def stop_application(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "StopApplication", input, options) end @doc """ Adds one or more key-value tags to a Kinesis Analytics application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see [Using Tagging](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-tagging.html). """ def tag_resource(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "TagResource", input, options) end @doc """ Removes one or more tags from a Kinesis Analytics application. For more information, see [Using Tagging](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-tagging.html). """ def untag_resource(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "UntagResource", input, options) end @doc """ This documentation is for version 1 of the Amazon Kinesis Data Analytics API, which only supports SQL applications. Version 2 of the API supports SQL and Java applications. For more information about version 2, see [Amazon Kinesis Data Analytics API V2 Documentation](/kinesisanalytics/latest/apiv2/Welcome.html). Updates an existing Amazon Kinesis Analytics application. Using this API, you can update application code, input configuration, and output configuration. Note that Amazon Kinesis Analytics updates the `CurrentApplicationVersionId` each time you update your application. This operation requires permission for the `kinesisanalytics:UpdateApplication` action. """ def update_application(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "UpdateApplication", input, options) end end
lib/aws/generated/kinesis_analytics.ex
0.864439
0.650918
kinesis_analytics.ex
starcoder
defmodule AWS.DatabaseMigration do @moduledoc """ Database Migration Service Database Migration Service (DMS) can migrate your data to and from the most widely used commercial and open-source databases such as Oracle, PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, MySQL, and SAP Adaptive Server Enterprise (ASE). The service supports homogeneous migrations such as Oracle to Oracle, as well as heterogeneous migrations between different database platforms, such as Oracle to MySQL or SQL Server to PostgreSQL. For more information about DMS, see [What Is Database Migration Service?](https://docs.aws.amazon.com/dms/latest/userguide/Welcome.html) in the *Database Migration Service User Guide.* """ alias AWS.Client alias AWS.Request def metadata do %AWS.ServiceMetadata{ abbreviation: nil, api_version: "2016-01-01", content_type: "application/x-amz-json-1.1", credential_scope: nil, endpoint_prefix: "dms", global?: false, protocol: "json", service_id: "Database Migration Service", signature_version: "v4", signing_name: "dms", target_prefix: "AmazonDMSv20160101" } end @doc """ Adds metadata tags to an DMS resource, including replication instance, endpoint, security group, and migration task. These tags can also be used with cost allocation reporting to track cost associated with DMS resources, or used in a Condition statement in an IAM policy for DMS. For more information, see [ `Tag` ](https://docs.aws.amazon.com/dms/latest/APIReference/API_Tag.html) data type description. """ def add_tags_to_resource(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "AddTagsToResource", input, options) end @doc """ Applies a pending maintenance action to a resource (for example, to a replication instance). """ def apply_pending_maintenance_action(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ApplyPendingMaintenanceAction", input, options) end @doc """ Cancels a single premigration assessment run. This operation prevents any individual assessments from running if they haven't started running. It also attempts to cancel any individual assessments that are currently running. """ def cancel_replication_task_assessment_run(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "CancelReplicationTaskAssessmentRun", input, options) end @doc """ Creates an endpoint using the provided settings. For a MySQL source or target endpoint, don't explicitly specify the database using the `DatabaseName` request parameter on the `CreateEndpoint` API call. Specifying `DatabaseName` when you create a MySQL endpoint replicates all the task tables to this single database. For MySQL endpoints, you specify the database only when you specify the schema in the table-mapping rules of the DMS task. """ def create_endpoint(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "CreateEndpoint", input, options) end @doc """ Creates an DMS event notification subscription. You can specify the type of source (`SourceType`) you want to be notified of, provide a list of DMS source IDs (`SourceIds`) that triggers the events, and provide a list of event categories (`EventCategories`) for events you want to be notified of. If you specify both the `SourceType` and `SourceIds`, such as `SourceType = replication-instance` and `SourceIdentifier = my-replinstance`, you will be notified of all the replication instance events for the specified source. If you specify a `SourceType` but don't specify a `SourceIdentifier`, you receive notice of the events for that source type for all your DMS sources. If you don't specify either `SourceType` nor `SourceIdentifier`, you will be notified of events generated from all DMS sources belonging to your customer account. For more information about DMS events, see [Working with Events and Notifications](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html) in the *Database Migration Service User Guide.* """ def create_event_subscription(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "CreateEventSubscription", input, options) end @doc """ Creates the replication instance using the specified parameters. DMS requires that your account have certain roles with appropriate permissions before you can create a replication instance. For information on the required roles, see [Creating the IAM Roles to Use With the CLI and DMS API](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#CHAP_Security.APIRole). For information on the required permissions, see [IAM Permissions Needed to Use DMS](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#CHAP_Security.IAMPermissions). """ def create_replication_instance(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "CreateReplicationInstance", input, options) end @doc """ Creates a replication subnet group given a list of the subnet IDs in a VPC. The VPC needs to have at least one subnet in at least two availability zones in the Amazon Web Services Region, otherwise the service will throw a `ReplicationSubnetGroupDoesNotCoverEnoughAZs` exception. """ def create_replication_subnet_group(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "CreateReplicationSubnetGroup", input, options) end @doc """ Creates a replication task using the specified parameters. """ def create_replication_task(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "CreateReplicationTask", input, options) end @doc """ Deletes the specified certificate. """ def delete_certificate(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DeleteCertificate", input, options) end @doc """ Deletes the connection between a replication instance and an endpoint. """ def delete_connection(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DeleteConnection", input, options) end @doc """ Deletes the specified endpoint. All tasks associated with the endpoint must be deleted before you can delete the endpoint. """ def delete_endpoint(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DeleteEndpoint", input, options) end @doc """ Deletes an DMS event subscription. """ def delete_event_subscription(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DeleteEventSubscription", input, options) end @doc """ Deletes the specified replication instance. You must delete any migration tasks that are associated with the replication instance before you can delete it. """ def delete_replication_instance(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DeleteReplicationInstance", input, options) end @doc """ Deletes a subnet group. """ def delete_replication_subnet_group(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DeleteReplicationSubnetGroup", input, options) end @doc """ Deletes the specified replication task. """ def delete_replication_task(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DeleteReplicationTask", input, options) end @doc """ Deletes the record of a single premigration assessment run. This operation removes all metadata that DMS maintains about this assessment run. However, the operation leaves untouched all information about this assessment run that is stored in your Amazon S3 bucket. """ def delete_replication_task_assessment_run(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DeleteReplicationTaskAssessmentRun", input, options) end @doc """ Lists all of the DMS attributes for a customer account. These attributes include DMS quotas for the account and a unique account identifier in a particular DMS region. DMS quotas include a list of resource quotas supported by the account, such as the number of replication instances allowed. The description for each resource quota, includes the quota name, current usage toward that quota, and the quota's maximum value. DMS uses the unique account identifier to name each artifact used by DMS in the given region. This command does not take any parameters. """ def describe_account_attributes(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeAccountAttributes", input, options) end @doc """ Provides a list of individual assessments that you can specify for a new premigration assessment run, given one or more parameters. If you specify an existing migration task, this operation provides the default individual assessments you can specify for that task. Otherwise, the specified parameters model elements of a possible migration task on which to base a premigration assessment run. To use these migration task modeling parameters, you must specify an existing replication instance, a source database engine, a target database engine, and a migration type. This combination of parameters potentially limits the default individual assessments available for an assessment run created for a corresponding migration task. If you specify no parameters, this operation provides a list of all possible individual assessments that you can specify for an assessment run. If you specify any one of the task modeling parameters, you must specify all of them or the operation cannot provide a list of individual assessments. The only parameter that you can specify alone is for an existing migration task. The specified task definition then determines the default list of individual assessments that you can specify in an assessment run for the task. """ def describe_applicable_individual_assessments(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "DescribeApplicableIndividualAssessments", input, options ) end @doc """ Provides a description of the certificate. """ def describe_certificates(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeCertificates", input, options) end @doc """ Describes the status of the connections that have been made between the replication instance and an endpoint. Connections are created when you test an endpoint. """ def describe_connections(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeConnections", input, options) end @doc """ Returns information about the possible endpoint settings available when you create an endpoint for a specific database engine. """ def describe_endpoint_settings(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeEndpointSettings", input, options) end @doc """ Returns information about the type of endpoints available. """ def describe_endpoint_types(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeEndpointTypes", input, options) end @doc """ Returns information about the endpoints for your account in the current region. """ def describe_endpoints(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeEndpoints", input, options) end @doc """ Lists categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in [Working with Events and Notifications](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html) in the *Database Migration Service User Guide.* """ def describe_event_categories(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeEventCategories", input, options) end @doc """ Lists all the event subscriptions for a customer account. The description of a subscription includes `SubscriptionName`, `SNSTopicARN`, `CustomerID`, `SourceType`, `SourceID`, `CreationTime`, and `Status`. If you specify `SubscriptionName`, this action lists the description for that subscription. """ def describe_event_subscriptions(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeEventSubscriptions", input, options) end @doc """ Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on DMS events, see [Working with Events and Notifications](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html) in the *Database Migration Service User Guide.* """ def describe_events(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeEvents", input, options) end @doc """ Returns information about the replication instance types that can be created in the specified region. """ def describe_orderable_replication_instances(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "DescribeOrderableReplicationInstances", input, options ) end @doc """ For internal use only """ def describe_pending_maintenance_actions(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribePendingMaintenanceActions", input, options) end @doc """ Returns the status of the RefreshSchemas operation. """ def describe_refresh_schemas_status(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeRefreshSchemasStatus", input, options) end @doc """ Returns information about the task logs for the specified task. """ def describe_replication_instance_task_logs(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "DescribeReplicationInstanceTaskLogs", input, options ) end @doc """ Returns information about replication instances for your account in the current region. """ def describe_replication_instances(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeReplicationInstances", input, options) end @doc """ Returns information about the replication subnet groups. """ def describe_replication_subnet_groups(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeReplicationSubnetGroups", input, options) end @doc """ Returns the task assessment results from the Amazon S3 bucket that DMS creates in your Amazon Web Services account. This action always returns the latest results. For more information about DMS task assessments, see [Creating a task assessment report](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.AssessmentReport.html) in the *Database Migration Service User Guide*. """ def describe_replication_task_assessment_results(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "DescribeReplicationTaskAssessmentResults", input, options ) end @doc """ Returns a paginated list of premigration assessment runs based on filter settings. These filter settings can specify a combination of premigration assessment runs, migration tasks, replication instances, and assessment run status values. This operation doesn't return information about individual assessments. For this information, see the `DescribeReplicationTaskIndividualAssessments` operation. """ def describe_replication_task_assessment_runs(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "DescribeReplicationTaskAssessmentRuns", input, options ) end @doc """ Returns a paginated list of individual assessments based on filter settings. These filter settings can specify a combination of premigration assessment runs, migration tasks, and assessment status values. """ def describe_replication_task_individual_assessments(%Client{} = client, input, options \\ []) do Request.request_post( client, metadata(), "DescribeReplicationTaskIndividualAssessments", input, options ) end @doc """ Returns information about replication tasks for your account in the current region. """ def describe_replication_tasks(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeReplicationTasks", input, options) end @doc """ Returns information about the schema for the specified endpoint. """ def describe_schemas(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeSchemas", input, options) end @doc """ Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted. Note that the "last updated" column the DMS console only indicates the time that DMS last updated the table statistics record for a table. It does not indicate the time of the last update to the table. """ def describe_table_statistics(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "DescribeTableStatistics", input, options) end @doc """ Uploads the specified certificate. """ def import_certificate(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ImportCertificate", input, options) end @doc """ Lists all metadata tags attached to an DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see [ `Tag` ](https://docs.aws.amazon.com/dms/latest/APIReference/API_Tag.html) data type description. """ def list_tags_for_resource(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ListTagsForResource", input, options) end @doc """ Modifies the specified endpoint. For a MySQL source or target endpoint, don't explicitly specify the database using the `DatabaseName` request parameter on the `ModifyEndpoint` API call. Specifying `DatabaseName` when you modify a MySQL endpoint replicates all the task tables to this single database. For MySQL endpoints, you specify the database only when you specify the schema in the table-mapping rules of the DMS task. """ def modify_endpoint(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ModifyEndpoint", input, options) end @doc """ Modifies an existing DMS event notification subscription. """ def modify_event_subscription(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ModifyEventSubscription", input, options) end @doc """ Modifies the replication instance to apply new settings. You can change one or more parameters by specifying these parameters and the new values in the request. Some settings are applied during the maintenance window. """ def modify_replication_instance(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ModifyReplicationInstance", input, options) end @doc """ Modifies the settings for the specified replication subnet group. """ def modify_replication_subnet_group(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ModifyReplicationSubnetGroup", input, options) end @doc """ Modifies the specified replication task. You can't modify the task endpoints. The task must be stopped before you can modify it. For more information about DMS tasks, see [Working with Migration Tasks](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.html) in the *Database Migration Service User Guide*. """ def modify_replication_task(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ModifyReplicationTask", input, options) end @doc """ Moves a replication task from its current replication instance to a different target replication instance using the specified parameters. The target replication instance must be created with the same or later DMS version as the current replication instance. """ def move_replication_task(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "MoveReplicationTask", input, options) end @doc """ Reboots a replication instance. Rebooting results in a momentary outage, until the replication instance becomes available again. """ def reboot_replication_instance(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "RebootReplicationInstance", input, options) end @doc """ Populates the schema for the specified endpoint. This is an asynchronous operation and can take several minutes. You can check the status of this operation by calling the DescribeRefreshSchemasStatus operation. """ def refresh_schemas(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "RefreshSchemas", input, options) end @doc """ Reloads the target database table with the source data. You can only use this operation with a task in the `RUNNING` state, otherwise the service will throw an `InvalidResourceStateFault` exception. """ def reload_tables(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "ReloadTables", input, options) end @doc """ Removes metadata tags from an DMS resource, including replication instance, endpoint, security group, and migration task. For more information, see [ `Tag` ](https://docs.aws.amazon.com/dms/latest/APIReference/API_Tag.html) data type description. """ def remove_tags_from_resource(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "RemoveTagsFromResource", input, options) end @doc """ Starts the replication task. For more information about DMS tasks, see [Working with Migration Tasks ](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.html) in the *Database Migration Service User Guide.* """ def start_replication_task(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "StartReplicationTask", input, options) end @doc """ Starts the replication task assessment for unsupported data types in the source database. You can only use this operation for a task if the following conditions are true: * The task must be in the `stopped` state. * The task must have successful connections to the source and target. If either of these conditions are not met, an `InvalidResourceStateFault` error will result. For information about DMS task assessments, see [Creating a task assessment report](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.AssessmentReport.html) in the *Database Migration Service User Guide*. """ def start_replication_task_assessment(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "StartReplicationTaskAssessment", input, options) end @doc """ Starts a new premigration assessment run for one or more individual assessments of a migration task. The assessments that you can specify depend on the source and target database engine and the migration type defined for the given task. To run this operation, your migration task must already be created. After you run this operation, you can review the status of each individual assessment. You can also run the migration task manually after the assessment run and its individual assessments complete. """ def start_replication_task_assessment_run(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "StartReplicationTaskAssessmentRun", input, options) end @doc """ Stops the replication task. """ def stop_replication_task(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "StopReplicationTask", input, options) end @doc """ Tests the connection between the replication instance and the endpoint. """ def test_connection(%Client{} = client, input, options \\ []) do Request.request_post(client, metadata(), "TestConnection", input, options) end end
lib/aws/generated/database_migration.ex
0.874037
0.432303
database_migration.ex
starcoder
defmodule WebDriver.Mouse do @moduledoc """ Mouse driver event. """ @doc """ Move the mouse to the specified element. Parameters: element: The element to move the mouse to. offsetx: X offset to the element coordinates offsety: Y offset to the element coordinates https://code.google.com/p/selenium/wiki/JsonWireProtocol#/session/:sessionId/moveto """ def move_to element, offsetx \\ 0, offsety \\ 0 do id = URI.decode element.id cmd element.session, :move_to, %{element: id, xoffset: offsetx, yoffset: offsety} end @doc """ Click a mouse button. Parameters: session : The session server process to send the click to. button: The button to click, one of :left, :middle or :right https://code.google.com/p/selenium/wiki/JsonWireProtocol#/session/:sessionId/click """ def click session, button \\ :left do cmd session, :mouse_click, %{button: button_number(button)} end @doc """ Send a Button Down event. Parameters: session : The session server process to send the event to. button: The button to press, one of :left, :middle or :right You will get an error if you fire a button down event on a button that is already down (on some browsers). https://code.google.com/p/selenium/wiki/JsonWireProtocol#/session/:sessionId/buttondown """ def button_down session, button \\ :left do cmd session, :mouse_button_down, %{button: button_number(button)} end @doc """ Send a Button Up event. Parameters: session : The session server process to send the event to. button: The button to raise, one of :left, :middle or :right You will get an error if you fire a button up event on a mouse button that has not recieved a button down event previously. https://code.google.com/p/selenium/wiki/JsonWireProtocol#/session/:sessionId/buttonup """ def button_up session, button \\ :left do cmd session, :mouse_button_up, %{button: button_number(button)} end @doc """ Send a double click mouse event. Parameters: session : The session server process to send the event to. button: The button to double click, one of :left, :middle or :right https://code.google.com/p/selenium/wiki/JsonWireProtocol#/session/:sessionId/doubleclick """ def double_click session, button \\ :left do cmd session, :mouse_double_click, %{button: button_number(button)} end # Send a command to the server defp cmd session, command, params do :gen_server.call session, {command, params}, 20000 end defp button_number :left do 0 end defp button_number :middle do 1 end defp button_number :right do 2 end end
lib/webdriver/mouse.ex
0.753194
0.410756
mouse.ex
starcoder
defmodule OK do @moduledoc """ The `OK` module enables clean and expressive error handling when coding with idiomatic `:ok`/`:error` tuples. We've included many examples in the function docs here, but you can also check out the [README](https://github.com/CrowdHailer/OK/blob/master/README.md) for more details and usage. Feel free to [open an issue](https://github.com/CrowdHailer/OK/issues) for any questions that you have. """ @doc """ Applies a function to the interior value of a result tuple. If the tuple is tagged `:ok` the value will be mapped by the function. A tuple tagged `:error` will be unchanged. ## Examples iex> OK.map({:ok, 2}, fn (x) -> 2 * x end) {:ok, 4} iex> OK.map({:error, :some_reason}, fn (x) -> 2 * x end) {:error, :some_reason} """ @spec map({:ok, a}, (a -> b)) :: {:ok, b} when a: any, b: any @spec map({:error, reason}, (any -> any)) :: {:error, reason} when reason: any def map({:ok, value}, func) when is_function(func, 1), do: {:ok, func.(value)} def map({:error, reason}, _func), do: {:error, reason} @doc """ Takes a result tuple and a next function. If the result tuple is tagged as a success then its value will be passed to the next function. If the tag is failure then the next function is skipped. ## Examples iex> OK.flat_map({:ok, 2}, fn (x) -> {:ok, 2 * x} end) {:ok, 4} iex> OK.flat_map({:error, :some_reason}, fn (x) -> {:ok, 2 * x} end) {:error, :some_reason} """ @spec flat_map({:ok, a} | {:error, reason}, (a -> {:ok, b} | {:error, reason})) :: {:ok, b} | {:error, reason} when a: any, b: any, reason: any # NOTE return value of function is not checked to be a result tuple. # errors are informative enough when piped to something else expecting result tuple. # Also dialyzer will catch in anonymous function with incorrect typespec is given. def flat_map({:ok, value}, func) when is_function(func, 1), do: func.(value) def flat_map({:error, reason}, _func), do: {:error, reason} @doc """ Applies a function to the interior error of a result tuple. If the tuple is tagged `:error`, the value will be mapped by the function. A tuple tagged `:ok` will be unchanged. ## Examples ```elixir iex> OK.map_err({:error, :message}, fn (e) -> e |> to_string |> String.capitalize end) {:error, "Message"} iex> OK.map_err({:ok, nil}, fn (e) -> e |> to_string end) {:ok, nil} ``` """ @spec map_err({:ok, value}, (any -> any)) :: {:ok, value} when value: any @spec map_err({:error, a}, (a -> b)) :: {:error, b} when a: any, b: any def map_err({:ok, value}, _func), do: {:ok, value} def map_err({:error, err}, func) when is_function(func, 1), do: {:error, func.(err)} @doc """ Takes a result tuple and a next function. If the result tuple is tagged as a success then the next function is skipped. If the tag is failure then its value will be passed to the next function. ## Examples ```elixir iex> OK.flat_map_err({:error, :message}, fn (_) -> {:ok, :replacement} end) {:ok, :replacement} iex> OK.flat_map_err({:ok, :value}, fn (_) -> {:ok, :replacement} end) {:ok, :value} ``` """ @spec flat_map_err({:ok, value} | {:error, a}, (a -> {:ok, value} | {:error, b})) :: {:ok, value} | {:error, b} when a: any, b: any, value: any def flat_map_err({:ok, value}, _func), do: {:ok, value} def flat_map_err({:error, err}, func) when is_function(func, 1), do: func.(err) @doc """ Provides a replacement success value for an error. If the result tuple is already `:ok`, this returns it unchanged. If the result tuple is `:error`, this uses the alternate `value` to produce `{:ok, value}`. ## Examples ```elixir iex> OK.ok_or({:ok, :good}, :fallback) {:ok, :good} iex> OK.ok_or({:error, nil}, :fallback) {:ok, :fallback} ``` """ @spec ok_or({:ok, value}, value) :: {:ok, value} when value: any def ok_or({:ok, value}, _value), do: {:ok, value} def ok_or({:error, _}, value), do: {:ok, value} @doc """ Computes a replacement success value for an error. If the result tuple is already `:ok`, this returns it unchanged. If the result tuple is `:error`, this uses the alternate `func`tion to compute `{:ok, func.()}`. Prefer this function to `ok_or` when the replacement value is expensive to compute, or alters state, and so should only be computed lazily, rather than eagerly. Elixir computes function arguments before entering the call. ## Examples ```elixir iex> OK.ok_or_else({:ok, :good}, fn (_) -> :fallback end) {:ok, :good} iex> OK.ok_or_else({:error, nil}, fn (_) -> :fallback end) {:ok, :fallback} ``` """ @spec ok_or_else({:ok, value} | {:error, any}, (() -> value)) :: {:ok, value} when value: any def ok_or_else({:ok, value}, _func), do: {:ok, value} def ok_or_else({:error, err}, func) when is_function(func, 1), do: {:ok, func.(err)} @doc """ Transform every element of a list with a mapping function. The mapping function must return a result tuple. If all of the result tuples are tagged :ok, then it returns a list tagged with :ok. If one or more of the result tuples are tagged :error, it returns the first error. ## Examples iex> OK.map_all(1..3, &safe_div(6, &1)) {:ok, [6.0, 3.0, 2.0]} iex> OK.map_all([-1, 0, 1], &safe_div(6, &1)) {:error, :zero_division} """ @spec map_all([a], (a -> {:ok, b} | {:error, reason})) :: {:ok, [b]} | {:error, reason} when a: any, b: any, reason: any def map_all(list, func) when is_function(func, 1) do result = Enum.reduce_while(list, [], fn value, acc -> case func.(value) do {:ok, value} -> {:cont, [value | acc]} {:error, _} = error -> {:halt, error} end end) if is_list(result), do: {:ok, Enum.reverse(result)}, else: result end @doc """ Takes a result tuple, a predicate function, and an error reason. If the result tuple is tagged as a success then its value will be passed to the predicate function. If the predicate returns `true`, then the result tuple stay the same. If the predicate returns `false`, then the result tuple becomes `{:error, reason}`. If the tag is failure then the predicate function is skipped. ## Examples iex> OK.check({:ok, 2}, fn (x) -> x == 2 end, :bad_value) {:ok, 2} iex> OK.check({:ok, 2}, fn (x) -> x == 3 end, :bad_value) {:error, :bad_value} iex> OK.check({:error, :some_reason}, fn (x) -> x == 4 end, :bad_value) {:error, :some_reason} """ @spec check({:ok, a}, (a -> boolean), test_failure_reason) :: {:ok, a} | {:error, test_failure_reason} when a: any, test_failure_reason: any @spec check({:error, reason}, (a -> boolean), test_failure_reason) :: {:error, reason} when a: any, reason: any, test_failure_reason: any def check({:ok, value}, func, reason) when is_function(func, 1) do case func.(value) do true -> {:ok, value} false -> {:error, reason} end end def check({:error, reason}, _func, _reason), do: {:error, reason} @doc false @deprecated "use OK.success?/1 instead" @spec is_success?({:ok, a}) :: true when a: any @spec is_success?({:error, reason}) :: false when reason: any def is_success?(value), do: success?(value) @doc """ Checks if a result tuple is tagged as `:ok`, and returns `true` if so. If the tuple is tagged as `:error`, returns `false`. ## Examples iex> OK.success?({:ok, "some value"}) true iex> OK.success?({:error, :some_reason}) false """ @spec success?({:ok, a}) :: true when a: any @spec success?({:error, reason}) :: false when reason: any def success?({:ok, _value}), do: true def success?({:error, _reason}), do: false @doc false @deprecated "use OK.failure?/1 instead" @spec is_failure?({:ok, a}) :: false when a: any @spec is_failure?({:error, reason}) :: true when reason: any def is_failure?(value), do: failure?(value) @doc """ Checks if a result tuple is tagged as `:error`, and returns `true` if so. If the tuple is tagged as `:ok`, returns `false`. ## Examples iex> OK.failure?({:error, :some_reason}) true iex> OK.failure?({:ok, "some value"}) false """ @spec failure?({:ok, a}) :: false when a: any @spec failure?({:error, reason}) :: true when reason: any def failure?({:ok, _value}), do: false def failure?({:error, _reason}), do: true @doc guard: true @doc """ Checks if a result tuple is tagged as `:error`, and returns `true` if so. If the tuple is tagged as `:ok`, returns `false`. Allowed in guards. ## Examples iex> require OK ...> f = fn result when OK.is_success(result) -> "ok" end ...> f.({:ok, "some value"}) "ok" iex> require OK ...> f = fn result when OK.is_success(result) -> "ok" end ...> f.({:error, :some_reason}) ** (FunctionClauseError) no function clause matching in anonymous fn/1 in OKTest.\"doctest OK.is_success/1 (54)\"/1 iex> require OK ...> f = fn result when OK.is_success(result) -> "ok" end ...> f.(nil) ** (FunctionClauseError) no function clause matching in anonymous fn/1 in OKTest.\"doctest OK.is_success/1 (55)\"/1 """ @spec is_success(term()) :: Macro.t() defguard is_success(result) when is_tuple(result) and tuple_size(result) === 2 and elem(result, 0) === :ok @doc guard: true @doc """ Checks if a result tuple is tagged as `:error`, and returns `true` if so. If the tuple is tagged as `:ok`, returns `false`. Allowed in guards. ## Examples iex> require OK ...> f = fn result when OK.is_failure(result) -> "error" end ...> f.({:error, :some_reason}) "error" iex> require OK ...> f = fn result when OK.is_failure(result) -> "error" end ...> f.({:ok, "some value"}) ** (FunctionClauseError) no function clause matching in anonymous fn/1 in OKTest."doctest OK.is_failure/1 (51)"/1 iex> require OK ...> f = fn result when OK.is_failure(result) -> "error" end ...> f.(nil) ** (FunctionClauseError) no function clause matching in anonymous fn/1 in OKTest.\"doctest OK.is_failure/1 (52)\"/1 """ @spec is_failure(term()) :: Macro.t() defguard is_failure(result) when is_tuple(result) and tuple_size(result) === 2 and elem(result, 0) === :error @doc """ Wraps a value as a successful result tuple. ## Examples iex> OK.success(:value) {:ok, :value} """ defmacro success(value) do quote do {:ok, unquote(value)} end end @doc """ Creates a failed result tuple with the given reason. ## Examples iex> OK.failure("reason") {:error, "reason"} """ defmacro failure(reason) do quote do {:error, unquote(reason)} end end @doc """ Wraps any term in an `:ok` tuple, unless already a result monad. ## Examples iex> OK.wrap("value") {:ok, "value"} iex> OK.wrap({:ok, "value"}) {:ok, "value"} iex> OK.wrap({:error, "reason"}) {:error, "reason"} """ def wrap({:ok, value}), do: {:ok, value} def wrap({:error, reason}), do: {:error, reason} def wrap(other), do: {:ok, other} @doc """ Require a variable not to be nil. Optionally provide a reason why variable is required. ## Examples iex> OK.required(:some) {:ok, :some} iex> OK.required(nil) {:error, :value_required} iex> OK.required(Map.get(%{}, :port), :port_number_required) {:error, :port_number_required} """ @spec required(any, any) :: {:ok, any} | {:error, any} def required(value, reason \\ :value_required) def required(nil, reason), do: {:error, reason} def required(value, _reason), do: {:ok, value} @doc """ Pipeline version of `map/2`. ## Examples iex> {:ok, 5} ~> Integer.to_string {:ok, "5"} iex> {:error, :zero_division_error} ~> Integer.to_string {:error, :zero_division_error} iex> {:ok, "a,b"} ~> String.split(",") {:ok, ["a", "b"]} """ defmacro lhs ~> {call, line, args} do value = quote do: value args = [value | args || []] quote do OK.map(unquote(lhs), fn unquote(value) -> unquote({call, line, args}) end) end end @doc """ The OK result pipe operator `~>>`, or result monad flat_map operator, is similar to Elixir's native `|>` except it is used within happy path. It takes the value out of an `{:ok, value}` tuple and passes it as the first argument to the function call on the right. It can be used in several ways. Pipe to a local call.<br /> _(This is equivalent to calling `double(5)`)_ iex> {:ok, 5} ~>> double() {:ok, 10} Pipe to a remote call.<br /> _(This is equivalent to calling `OKTest.double(5)`)_ iex> {:ok, 5} ~>> OKTest.double() {:ok, 10} iex> {:ok, 5} ~>> __MODULE__.double() {:ok, 10} Pipe with extra arguments.<br /> _(This is equivalent to calling `safe_div(6, 2)`)_ iex> {:ok, 6} ~>> safe_div(2) {:ok, 3.0} iex> {:ok, 6} ~>> safe_div(0) {:error, :zero_division} It also works with anonymous functions. iex> {:ok, 3} ~>> (fn (x) -> {:ok, x + 1} end).() {:ok, 4} iex> {:ok, 6} ~>> decrement().(2) {:ok, 4} When an error is returned anywhere in the pipeline, it will be returned. iex> {:ok, 6} ~>> safe_div(0) ~>> double() {:error, :zero_division} iex> {:error, :previous_bad} ~>> safe_div(0) ~>> double() {:error, :previous_bad} """ defmacro lhs ~>> {call, line, args} do value = quote do: value args = [value | args || []] quote do OK.flat_map(unquote(lhs), fn unquote(value) -> unquote({call, line, args}) end) end end @doc """ Pipeline version of `map_err/2`. ## Examples ```elixir iex> {:ok, 5} <~ Integer.to_string {:ok, 5} iex> {:error, :message} <~ to_string <~ String.capitalize {:error, "Message"} ``` """ defmacro lhs <~ {call, line, args} do value = quote do: value args = [value | args || []] quote do OK.map_err(unquote(lhs), fn unquote(value) -> unquote({call, line, args}) end) end end @doc """ The OK result pipe operator `<<~`, or result monad `flat_map_err` operator, is similar to Elixir’s native `|>` except it is used within the error path. It takes the error out of an `{:error, err_value}` tuple and passes it as the first argumnet to the function call on the right. The right call is expected to return a result tuple, of either `:ok` or `:error`. It can be used in several ways. Pipe to a local call. *(This is equivalent to calling `double(5)`) ```elixir iex> {:error, 5} <<~ double() {:ok, 10} ``` Pipe to a remote call. *(This is equivalent to calling `OKTest.double(5)`)* ```elixir iex> {:error, 5} <<~ OKTest.double() {:ok, 10} iex> {:error, 5} <<~ __MODULE__.double() {:ok, 10} ``` Pipe with extra arguments. *(This is equivalent to calling `safe_div(6, 2)`)* ```elixir iex> {:error, 6} <<~ safe_div(2) {:ok, 3.0} iex> {:error, 6} <<~ safe_div(0) {:error, :zero_division} ``` It also works with anonymous functions. ```elixir iex> {:error, 3} <<~ (fn (x) -> {:ok, x + 1} end).() {:ok, 4} iex> {:error, 6} <<~ decrement().(2) {:ok, 4} ``` When a success is returned anywhere in the pipeline, it will be returned without invoking subsequent error modifiers. This is because `<<~` and `flat_map_err` are only useful to handle failures, and are not needed on successes. Use `~>>` and `flat_map` for ongoing computation, and the error paths only for immediate error recovery and continuation. ```elixir iex> {:error, 6} <<~ double() <<~ safe_div(2) {:ok, 12} iex> {:ok, 6} <<~ safe_div(0) <<~ double() {:ok, 6} ``` """ defmacro lhs <<~ {call, line, args} do value = quote do: value args = [value | args || []] quote do OK.flat_map_err(unquote(lhs), fn unquote(value) -> unquote({call, line, args}) end) end end @doc """ Pipeline version of `ok_or`. This operator takes a result tuple and, if it is success, returns it unmodified. If the tuple is an error, then it wraps the right-hand value in `{:ok, v}` and produces it. This is useful for providing an immediate fallback value to a fallible computation without breaking the rest of the data flow. ## Examples ```elixir iex> {:ok, 5} <|> 6 {:ok, 5} iex> {:error, nil} <|> 6 {:ok, 6} ``` """ defmacro lhs <|> value do quote do OK.ok_or(unquote(lhs), unquote(value)) end end @doc """ Pipeline version of `ok_or_else`. This operator takes a result tuple and, if it is success, returns it unmodified. If the tuple is an error, then it passes the error value into the right-hand function, executes it, and returns the result wrapped in `{:ok, rhs()}`. ## Examples ```elixir iex> {:ok, 5} <~> (fn (err) -> err |> to_string |> String.length end).() {:ok, 5} iex> {:error, :unknown} <~> (fn (err) -> err |> to_string |> String.length end).() {:ok, 7} """ defmacro lhs <~> {call, line, args} do value = quote do: value args = [value | args || []] quote do OK.ok_or_else(unquote(lhs), fn unquote(value) -> unquote({call, line, args}) end) end end @doc """ Lightweight notation for working with the values from serval failible components. Values are extracted from an ok tuple using the in (`<-`) operator. Any line using this operator that trys to match on an error tuple will result in early return. If all bindings can be made, i.e. all functions returned `{:ok, value}`, then the after block is executed to return the final value. Return values from the after block are wrapped as an ok result, unless they are already a result tuple. The return value of a for comprehension is always a result monad iex> OK.for do ...> a <- safe_div(8, 2) ...> b <- safe_div(a, 2) ...> after ...> a + b ...> end {:ok, 6.0} iex> OK.for do ...> a <- safe_div(8, 2) ...> b <- safe_div(a, 2) ...> after ...> OK.success(a + b) ...> end {:ok, 6.0} iex> OK.for do ...> a <- safe_div(8, 2) ...> _ <- safe_div(a, 2) ...> after ...> {:error, :something_else} ...> end {:error, :something_else} Regular matching using the `=` operator is also available, for calculating intermediate values. iex> OK.for do ...> a <- safe_div(8, 2) ...> b = 2.0 ...> after ...> a + b ...> end {:ok, 6.0} iex> OK.for do ...> a <- safe_div(8, 2) ...> b <- safe_div(a, 0) # error here ...> after ...> a + b # does not execute this line ...> end {:error, :zero_division} iex> OK.for do: :literal, after: :result {:ok, :result} """ defmacro for(do: binding, after: yield_block) do {:__block__, _env, bindings} = wrap_code_block(binding) safe_yield_block = quote do unquote(__MODULE__).wrap(unquote(yield_block)) end expand_bindings(bindings, safe_yield_block) end defmacro for(_) do description = """ OK.for/1 requires `do` and `after` clauses. e.g. OK.for do a <- safe_div(8, 2) b <- safe_div(a, 2) after a + b end """ raise %SyntaxError{ file: __ENV__.file, line: __ENV__.line, description: description } end @doc """ Handle return value from several failible functions. Values are extracted from an ok tuple using the in (`<-`) operator. Any line using this operator that trys to match on an error tuple will result in early return. If all bindings can be made, i.e. all functions returned `{:ok, value}`, then the after block is executed to return the final value. If any binding fails then the rescue block will be tried. *Note: return value from after will be returned unwrapped* ## Examples iex> OK.try do ...> a <- safe_div(8, 2) ...> b <- safe_div(a, 2) ...> after ...> a + b ...> rescue ...> :zero_division -> ...> :nan ...> end 6.0 iex> OK.try do ...> a <- safe_div(8, 2) ...> b <- safe_div(a, 0) ...> after ...> a + b ...> rescue ...> :zero_division -> ...> :nan ...> end :nan """ defmacro try(do: bind_block, after: yield_block, rescue: exception_clauses) do {:__block__, _env, bindings} = wrap_code_block(bind_block) quote do case unquote(expand_bindings(bindings, yield_block)) do {:error, reason} -> case reason do unquote(exception_clauses) end value -> value end end end defmacro try(_) do description = """ OK.try/1 requires `do`, `after` and `rescue` clauses. e.g. OK.try do a <- safe_div(8, 2) b <- safe_div(a, 0) after a + b rescue :zero_division -> :nan end """ raise %SyntaxError{ file: __ENV__.file, line: __ENV__.line, description: description } end defp wrap_code_block(block = {:__block__, _env, _lines}), do: block defp wrap_code_block(expression = {_, env, _}) do {:__block__, env, [expression]} end defp wrap_code_block(literal) do {:__block__, [], [literal]} end defp expand_bindings([{:<-, env, [left, right]} | rest], yield_block) do line = Keyword.get(env, :line) normal_cases = quote line: line do {:ok, unquote(left)} -> unquote(expand_bindings(rest, yield_block)) {:error, reason} -> {:error, reason} end warning_case = quote line: line, generated: true do return -> raise %OK.BindError{ return: return, lhs: unquote(Macro.to_string(left)), rhs: unquote(Macro.to_string(right)) } end quote line: line do case unquote(right) do unquote(normal_cases ++ warning_case) end end end defp expand_bindings([normal | rest], yield_block) do quote location: :keep do unquote(normal) unquote(expand_bindings(rest, yield_block)) end end defp expand_bindings([], yield_block) do yield_block end end
lib/ok.ex
0.907114
0.907926
ok.ex
starcoder
defmodule Ecto.LogEntry do @moduledoc """ Struct used for logging entries. It is composed of the following fields: * query - the query as string or a function that when invoked resolves to string; * source - the query data source; * params - the query parameters; * result - the query result as an `:ok` or `:error` tuple; * query_time - the time spent executing the query in native units; * decode_time - the time spent decoding the result in native units (it may be nil); * queue_time - the time spent to check the connection out in native units (it may be nil); * connection_pid - the connection process that executed the query; * ansi_color - the color that should be used when logging the entry. Notice all times are stored in native unit. You must convert them to the proper unit by using `System.convert_time_unit/3` before logging. """ alias Ecto.LogEntry @type t :: %LogEntry{query: String.t | (t -> String.t), source: String.t | Enum.t | nil, params: [term], query_time: integer, decode_time: integer | nil, queue_time: integer | nil, connection_pid: pid | nil, result: {:ok, term} | {:error, Exception.t}, ansi_color: IO.ANSI.ansicode | nil} defstruct query: nil, source: nil, params: [], query_time: nil, decode_time: nil, queue_time: nil, result: nil, connection_pid: nil, ansi_color: nil require Logger @doc """ Logs the given entry in debug mode. The logger call will be removed at compile time if `compile_time_purge_level` is set to higher than debug. """ def log(%{connection_pid: connection_pid, ansi_color: ansi_color} = entry) do Logger.debug(fn -> {_entry, iodata} = Ecto.LogEntry.to_iodata(entry) iodata end, ecto_conn_pid: connection_pid, ansi_color: ansi_color) entry end @doc """ Logs the given entry in the given level. The logger call won't be removed at compile time as custom level is given. """ def log(entry, level) do Logger.log(level, fn -> {_entry, iodata} = Ecto.LogEntry.to_iodata(entry) iodata end, ecto_conn_pid: entry.connection_pid) entry end @doc """ Converts a log entry into iodata. The entry is automatically resolved if it hasn't been yet. """ def to_iodata(entry) do %{query_time: query_time, decode_time: decode_time, queue_time: queue_time, params: params, query: query, result: result, source: source} = entry params = Enum.map params, fn %Ecto.Query.Tagged{value: value} -> value value -> value end {entry, ["QUERY", ?\s, ok_error(result), ok_source(source), time("db", query_time, true), time("decode", decode_time, false), time("queue", queue_time, false), ?\n, query, ?\s, inspect(params, charlists: false)]} end ## Helpers defp ok_error({:ok, _}), do: "OK" defp ok_error({:error, _}), do: "ERROR" defp ok_source(nil), do: "" defp ok_source(source), do: " source=#{inspect(source)}" defp time(_label, nil, _force), do: [] defp time(label, time, force) do us = System.convert_time_unit(time, :native, :micro_seconds) ms = div(us, 100) / 10 if force or ms > 0 do [?\s, label, ?=, :io_lib_format.fwrite_g(ms), ?m, ?s] else [] end end end
deps/ecto/lib/ecto/log_entry.ex
0.83346
0.602091
log_entry.ex
starcoder
defmodule Alchemy.Events do @moduledoc """ This module provides raw Event hooks into the various events supplied by the gateway. To use the macros in this module, it must be `used`. This also defines a `__using__` macro for that module, which will then allow those hooks to be loaded in the main application via `use`. ### Example Usage ```elixir defmodule Example do use Alchemy.Events Events.on_message(:inspect) def inspect(message) do IO.inspect message.content end end defmodule Application do use Application alias Alchemy.Client def start(_type, _args) do run = Client.start(@token) use Example run end end ``` """ require Logger alias Alchemy.Cogs.EventHandler require Alchemy.EventMacros import Alchemy.EventMacros @doc """ Unloads all the hooks in a module from the handler. If you just want to disable a single function from triggering, see `Events.disable/1`. ## Examples ```elixir Client.start(@token) use MyEvents ``` If we want to remove this hooks at any point, we can simply do ```elixir Events.unload(MyEvents) ``` And, to set hook the module back up, all we need to do is: ```elixir use MyEvents ``` """ @spec unload(atom) :: :ok def unload(module) do EventHandler.unload(module) Logger.info "*#{inspect module}* removed from the event handler" end @doc """ Unhooks a function from the event handler. If you want to unhook all the functions in a module, see `Events.unload/1`. Because you can have multiple hooks with the same name, this function takes both the module and the function name. ## Examples ```elixir defmodule Annoying do use Alchemy.Events Events.on_message(:inspect) def inspect(message), do: IO.inspect message.content end ``` This function is annoying us, so we can easily disable it: ```elixir Events.disable(Annoying, :inspect) ``` If we want to turn it back on, we can of course do ```elixir use Annoying ``` """ @spec disable(atom, atom) :: :ok def disable(module, function) do EventHandler.disable(module, function) Logger.info "*#{module}.#{function}* unhooked from the event handler" end @doc """ Registers a handle triggering whenever a channel gets created. `args` : `Alchemy.Channel.t` As opposed to `on_DMChannel_create`, this gets triggered when a channel gets created in a guild, and not when a user starts a DM with this client. ## Examples ```elixir Events.on_channel_create(:foo) def foo(channel), do: IO.inspect channel.name ``` """ defmacro on_channel_create(func) do handle(:channel_create, func) end @doc """ Registers a handle triggering whenever a user starts a DM with the client. `args` : `Alchemy.Channel.dm_channel` As opposed to `on_channel_create`, this event gets triggered when a user starts a direct message with this client. ## Examples ```elixir Events.on_DMChannel_create(:foo) def foo(%DMChannel{recipients: [user|_]}) do IO.inspect user.name <> " just DMed me!" end ``` """ defmacro on_DMChannel_create(func) do handle(:dm_channel_create, func) end @doc """ Registers a handle triggering whenever a user closes a DM with the client. `args` : `Alchemy.Channel.dm_channel` """ defmacro on_DMChannel_delete(func) do handle(:dm_channel_delete, func) end @doc """ Registers a handle triggering whenever a guild channel gets removed. `args` : `Alchemy.Channel.t` """ defmacro on_channel_delete(func) do handle(:channel_delete, func) end @doc """ Registers a handle triggering whenever this client joins a guild. `args` : `Alchemy.Guild.t` A good amount of these events fire when the client initially connects to the gateway, and don't actually represent the client joining a new guild. """ defmacro on_guild_join(func) do handle(:guild_create, func) end @doc """ Registers a handle triggering whenever a guild gets updated. `args` : `Alchemy.Guild.t` A guild gets updated for various reasons, be it a member or role edition, or something else. The guild updated with this new info will be sent to the hook. """ defmacro on_guild_update(func) do handle(:guild_update, func) end @doc """ Registers a handle triggering whenever a guild comes back online. `args` : `Alchemy.Guild.t` Sometimes due to outages, or other problems, guild may go offline. This can be checked via `guild.unavailable`. This event gets triggered whenever a guild comes back online after an outage. """ defmacro on_guild_online(func) do handle(:guild_online, func) end @doc """ Registers a handle triggering whenever the client leaves a guild. `args` : `snowflake` The id of the guild the client left gets sent to the hook. """ defmacro on_guild_leave(func) do handle(:guild_delete, func) end @doc """ Registers a handle triggering whenever a guild channel gets updated. `args` : `Alchemy.Channel.t` ## Examples ```elixir Events.on_channel_update(:foo) def foo(channel) do IO.inspect "\#{channel.name} was updated" end ``` """ defmacro on_channel_update(func) do handle(:channel_update, func) end @doc """ Registers a handle triggering whenever a user gets banned from a guild. `args` : `Alchemy.User.t, snowflake` The user, as well as the id of the guild they were banned from get passed to the hook. ## Example ```elixir Events.on_user_ban(:cancel_ban) def cancel_ban(user, guild) do Client.unban_member(guild, user.id) end ``` """ defmacro on_user_ban(func) do handle(:guild_ban, func) end @doc """ Registers a handle triggering whenever a user gets unbanned from a guild. `args` : `Alchemy.User.t, snowflake` Recieves the user struct, as well as the id of the guild from which the user has been unbanned. ## Examples ```elixir Events.on_user_unban(:reban) def reban(user, guild) do Client.ban_member(guild_id, user.id) end ``` """ defmacro on_user_unban(func) do handle(:guild_unban, func) end @doc """ Registers a handle triggering whenever a guild's emojis get updated. `args` : `[Guild.emoji], snowflake` Receives a list of the current emojis in the guild, after this event, and the id of the guild itself. """ defmacro on_emoji_update(func) do handle(:emoji_update, func) end @doc """ Registers a handle triggering whenever a guild's integrations get updated. `args` : `snowflake` Like other guild events, the info doesn't actually come through this event, but through `on_guild_update`. This hook is merely useful for reacting to the event having happened. """ defmacro on_integrations_update(func) do handle(:integrations_update, func) end @doc """ Registers a handle triggering whenever a member joins a guild. `args` : `snowflake` The information of the member doesn't actually come through this event, but through `on_guild_update`. """ defmacro on_member_join(func) do handle(:member_join, func) end @doc """ Registers a handle triggering when a member leaves a guild. `args` : `Alchemy.User.t, snowflake` Receives the user that left the guild, and the id of the guild they've left. """ defmacro on_member_leave(func) do handle(:member_leave, func) end @doc """ Registers a handle triggering when the status of a member changes in a guild. `args` : `Alchemy.Guild.Guild.member, snowflake` Receives the member that was updated, and the guild they belong to. """ defmacro on_member_update(func) do handle(:member_update, func) end @doc """ Registers a handle triggering whenever a role gets created in a guild. `args` : `Alchemy.Guild.role, snowflake` Receives the new role, as well as the id of the guild that it belongs to. """ defmacro on_role_create(func) do handle(:role_create, func) end @doc """ Registers a handle triggering whenever a role gets deleted from a guild. `args` : `snowflake, snowflake` Receives the id of the role that was deleted, and the id of the guild it was deleted from. """ defmacro on_role_delete(func) do handle(:role_delete, func) end @doc """ Registers a handle triggering whenever a message gets sent. `args` : `Alchemy.Message.t` ### Examples ```elixir use Alchemy.Events Events.on_message(:ping) def ping(msg), do: IO.inspect msg.content ``` """ defmacro on_message(func) do handle(:message_create, func) end @doc """ Registers a handle triggering whenever a message gets edited. `args` : `snowflake, snowflake` Receives the id of the message that was edited, and the channel it was edited in. """ defmacro on_message_edit(func) do handle(:message_update, func) end @doc """ Registers a handle triggering whenever a single message gets deleted. `args` : `snowflake, snowflake` Receives the id of the message that was deleted, and the channel it was deleted from. """ defmacro on_message_delete(func) do handle(:message_delete, func) end @doc """ Registers a handle triggering whenever messages get bulk deleted from a channel. `args` : `[snowflake], snowflake` Receives a list of message ids that were deleted, and the channel they were deleted from. """ defmacro on_bulk_delete(func) do handle(:message_delete_bulk, func) end @doc """ Registers a handle triggering whenever the presence of a user gets updated in a guild. `args` : `Alchemy.Presence.t` The presence struct here may be very incomplete. """ defmacro on_presence_update(func) do handle(:presence_update, func) end @doc """ Registers a handle triggering whenever a user starts typing in a channel. `args` : `snowflake, snowflake, Integer` Receives the id of the user, the channel, and a timestamp (unix seconds) of the typing event. """ defmacro on_typing(func) do handle(:typing_start, func) end @doc """ Registers a handle triggering whenever this user changes their settings. `args` : `String.t, String.t` Receives the username and avatar hash of the new settings. """ defmacro on_settings_update(func) do handle(:user_settings_update, func) end @doc """ Registers a handle triggering whenever this user changes. `args` : `Alchemy.User.t` Receives the new information for this user. """ defmacro on_user_update(func) do handle(:user_update, func) end @doc """ Registers a handle triggering whenever someone leaves / joins a voice channel. `args` : `Alchemy.Voice.state` Receives the corresponding voice state. """ defmacro on_voice_update(func) do handle(:voice_state_update, func) end @doc """ Registers a handle triggering whenever a shard receives a READY event. This event gets sent after a shard connects with the gateway, filling the cache with info about the guilds the bot is in. `args` : `Integer`, `Integer` Receives the shard number (starting at 0), and the total amount of shards. After this event has been received, most of the information in the cache should be failed. """ defmacro on_ready(func) do handle(:ready, func) end @doc """ Registers a handle triggering whenever a shard receives a member chunk. This event gets sent after a shard has requested offline guild member info for a guild. `args` : `snowflake`, `[Alchemy.Guild.GuildMember]` Receives the id of the guild the members are from, and a list of members loaded. """ defmacro on_member_chunk(func) do handle(:member_chunk, func) end @doc false # Requires and aliases this module, as well as adds a @handles attribute, # necessary to use the other macros defmacro __using__(_opts) do quote do alias Alchemy.Events require Events @handles [] @before_compile Events end end @doc false # For every handle in the attribute, a handler is added to the EventManager defmacro __before_compile__(_env) do quote do defmacro __using__(_opts) do for handle <- @handles do quote do Alchemy.Cogs.EventHandler.add_handler(unquote(handle)) end end end end end @doc false # This is useful in a few places, converts "aliases" made here, into the internal # event def convert_type(type) do case type do :DM_channel_create -> :dm_channel_create :DM_channel_delete -> :dm_channel_delete :guild_join -> :guild_create :user_ban -> :guild_ban :user_unban -> :guild_unban :message_edit -> :message_update :bulk_delete -> :message_delete_bulk :typing -> :typing_start :settings_update -> :user_settings_update :voice_update -> :voice_state_update x -> x end end end
lib/events.ex
0.886297
0.762645
events.ex
starcoder
defmodule ChallengeGov.GovDelivery.Implementation do @moduledoc """ Implementation details for GovDelivery We never actually care about the return values Everything is best effort to maintain the GovDelivery state """ @behaviour ChallengeGov.GovDelivery import SweetXml import Phoenix.View alias ChallengeGov.Challenges alias ChallengeGov.GovDelivery alias Web.Endpoint alias Web.Router.Helpers, as: Routes @impl ChallengeGov.GovDelivery def remove_topic(challenge) do endpoint = challenge.id |> code() |> GovDelivery.remove_topic_endpoint() response = Mojito.delete( endpoint, [auth_headers()] ) case response do {:ok, %{status_code: 200}} -> Challenges.clear_gov_delivery_topic(challenge) {:ok, :removed} {:ok, %{body: body, status_code: code}} -> {:error, %{body: body, status_code: code}} e -> {:error, e} end end @impl ChallengeGov.GovDelivery def add_topic(challenge) do body = xml_topic_from_challenge(challenge) response = Mojito.post( GovDelivery.create_topic_endpoint(), [ auth_headers(), {"content-type", "application/xml; charset: utf-8"} ], body ) case response do {:ok, %{status_code: 200}} -> Challenges.store_gov_delivery_topic(challenge, code(challenge.id)) set_category(challenge) {:ok, %{body: body, status_code: code}} -> {:error, %{body: body, status_code: code}} e -> {:error, e} end end @impl ChallengeGov.GovDelivery def subscribe_user_general(user) do body = xml_subscribe_general(user) response = Mojito.post( GovDelivery.subscribe_endpoint(), [ auth_headers(), {"content-type", "application/xml; charset: utf-8"} ], body ) case response do {:ok, %{status_code: 200}} -> {:ok, :subscribed} {:ok, %{body: body, status_code: code}} -> {:error, %{body: body, status_code: code}} e -> {:error, e} end end @impl ChallengeGov.GovDelivery def subscribe_user_challenge(user, challenge) do body = xml_subscribe_challenge(user, challenge) response = Mojito.post( GovDelivery.subscribe_endpoint(), [ auth_headers(), {"content-type", "application/xml; charset: utf-8"} ], body ) case response do {:ok, %{status_code: 200}} -> {:ok, :subscribed} {:ok, %{body: body, status_code: code}} -> {:error, %{body: body, status_code: code}} e -> {:error, e} end end @impl ChallengeGov.GovDelivery def send_bulletin(challenge, subject, body) do body = xml_send_bulletin(challenge, subject, body) response = Mojito.post( GovDelivery.send_bulletin_endpoint(), [ auth_headers(), {"content-type", "application/xml; charset: utf-8"} ], body ) case response do {:ok, %{status_code: 200}} -> {:ok, :sent} {:ok, %{body: body, status_code: code}} -> {:send_error, %{body: body, status_code: code}} e -> {:send_error, e} end end @impl ChallengeGov.GovDelivery def get_topic_subscribe_count(challenge) do response = Mojito.get( GovDelivery.topic_details_endpoint(code(challenge.id)), [ auth_headers(), {"content-type", "application/xml; charset: utf-8"} ] ) case response do {:ok, %{status_code: 200, body: body}} -> result = body |> xpath(~x"//topic/subscribers-count/text()") |> to_string() {:ok, parse_count_result(result)} {:ok, %{body: body, status_code: code}} -> {:error, %{body: body, status_code: code}} e -> {:error, e} end end def set_category(challenge) do endpoint = challenge.id |> code() |> GovDelivery.set_topic_categories_endpoint() response = Mojito.put( endpoint, [ auth_headers(), {"content-type", "application/xml; charset: utf-8"} ], xml_categories_for_challenge() ) case response do {:ok, %{status_code: 200}} -> {:ok, :added} {:ok, %{body: body, status_code: code}} -> {:category_error, %{body: body, status_code: code}} e -> {:category_error, e} end end defp auth_headers() do Mojito.Headers.auth_header( GovDelivery.username(), GovDelivery.password() ) end defp parse_count_result(nil), do: 0 defp parse_count_result(string) do case Integer.parse(string) do :error -> 0 {num, _remain} -> num end end defp xml_topic_from_challenge(challenge) do elements = [ {:code, nil, code(challenge.id)}, {:name, nil, challenge.title}, {"short-name", nil, challenge.title}, {:description, nil, challenge.tagline} ] XmlBuilder.generate({:topic, nil, elements}, format: :none, encoding: "UTF-8") end defp xml_categories_for_challenge() do elements = [ {:categories, %{type: "array"}, categories()} ] XmlBuilder.generate({:topic, nil, elements}, format: :none, encoding: "UTF-8") end defp categories() do [ {:category, nil, [ { :code, nil, GovDelivery.challenge_topic_category_code() } ]} ] end defp xml_subscribe_general(user) do general_topic = [ { :topic, nil, [ {:code, nil, GovDelivery.news_topic_code()} ] } ] elements = [ {:email, nil, user.email}, {"send-notifications", %{type: "boolean"}, "true"}, {:topics, %{type: "array"}, general_topic} ] XmlBuilder.generate({:subscriber, nil, elements}, format: :none, encoding: "UTF-8") end defp xml_subscribe_challenge(user, challenge) do general_topic = [ { :topic, nil, [ {:code, nil, code(challenge.id)} ] } ] elements = [ {:email, nil, user.email}, {"send-notifications", %{type: "boolean"}, "true"}, {:topics, %{type: "array"}, general_topic} ] XmlBuilder.generate({:subscriber, nil, elements}, format: :none, encoding: "UTF-8") end defp xml_send_bulletin(challenge, subject, body) do challenge_topic = [ { :topic, nil, [ {:code, nil, code(challenge.id)} ] } ] header_img = """ <img src="#{Routes.static_url(Endpoint, "/images/email-header.png")}" alt="Challenge.Gov logo" title="Challenge.Gov logo"/> """ customized_body = render_to_string(Web.BulletinView, "body.html", body: body) elements = [ {:header, nil, {:cdata, header_img}}, {:subject, nil, subject}, {:body, nil, {:cdata, customized_body}}, {:topics, %{type: "array"}, challenge_topic} ] XmlBuilder.generate({:bulletin, nil, elements}, format: :none, encoding: "UTF-8") end defp code(id) do "#{GovDelivery.challenge_topic_prefix_code()}-#{id}" end end
lib/challenge_gov/gov_delivery/implementation.ex
0.584271
0.416559
implementation.ex
starcoder
defmodule WechatPay.App do @moduledoc """ The **App** payment method. [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=8_1) ## Example Set up a client: ```elixir {:ok, client} = WechatPay.Client.new( app_id: "the-app_id", mch_id: "the-mch-id", api_key: "the-api_key", ssl: [ ca_cert: File.read!("fixture/certs/rootca.pem"), cert: File.read!("fixture/certs/apiclient_cert.pem"), key: File.read!("fixture/certs/apiclient_key.pem") ] ) ``` Place an order: ```elixir WechatPay.App.place_order(client, %{ body: "Plan 1", out_trade_no: "12345", fee_type: "CNY", total_fee: "600", spbill_create_ip: Void.Utils.get_system_ip(), notify_url: "http://example.com/", trade_type: "APP", product_id: "12345" }) ``` """ alias WechatPay.Utils.NonceStr alias WechatPay.Utils.Signature alias WechatPay.Client alias WechatPay.API @doc """ Place an order [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_1) """ @spec place_order(Client.t(), map, keyword) :: {:ok, map} | {:error, WechatPay.Error.t() | HTTPoison.Error.t()} defdelegate place_order(client, attrs, options \\ []), to: API @doc """ Query the order [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_2&index=4) """ @spec query_order(Client.t(), map, keyword) :: {:ok, map} | {:error, WechatPay.Error.t() | HTTPoison.Error.t()} defdelegate query_order(client, attrs, options \\ []), to: API @doc """ Close the order [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_3&index=5) """ @spec close_order(Client.t(), map, keyword) :: {:ok, map} | {:error, WechatPay.Error.t() | HTTPoison.Error.t()} defdelegate close_order(client, attrs, options \\ []), to: API @doc """ Request to refund [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_4&index=6) """ @spec refund(Client.t(), map, keyword) :: {:ok, map} | {:error, WechatPay.Error.t() | HTTPoison.Error.t()} defdelegate refund(client, attrs, options \\ []), to: API @doc """ Query the refund [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_5&index=7) """ @spec query_refund(Client.t(), map, keyword) :: {:ok, map} | {:error, WechatPay.Error.t() | HTTPoison.Error.t()} defdelegate query_refund(client, attrs, options \\ []), to: API @doc """ Download bill [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_6&index=8) """ @spec download_bill(Client.t(), map, keyword) :: {:ok, String.t()} | {:error, HTTPoison.Error.t()} defdelegate download_bill(client, attrs, options \\ []), to: API @doc """ Download fund flow [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_18&index=9) """ @spec download_fund_flow(Client.t(), map, keyword) :: {:ok, String.t()} | {:error, HTTPoison.Error.t()} defdelegate download_fund_flow(client, attrs, options \\ []), to: API @doc """ Report [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_8&index=9) """ @spec report(Client.t(), map, keyword) :: {:ok, map} | {:error, WechatPay.Error.t() | HTTPoison.Error.t()} defdelegate report(client, attrs, options \\ []), to: API @doc """ Query comments in a batch [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_99&index=12) """ @spec batch_query_comments(Client.t(), map, keyword) :: {:ok, String.t()} | {:error, HTTPoison.Error.t()} defdelegate batch_query_comments(client, attrs, options \\ []), to: API @doc """ Generate pay request info, which is required for the App SDK [Official document](https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_12&index=2) """ @spec generate_pay_request(Client.t(), String.t()) :: map def generate_pay_request(client, prepay_id) do data = %{ "appid" => client.app_id, "partnerid" => client.mch_id, "prepayid" => prepay_id, "package" => "Sign=WXPay", "noncestr" => NonceStr.generate(), "timestamp" => :os.system_time(:seconds) } data |> Map.merge(%{"sign" => Signature.sign(data, client.api_key, client.sign_type)}) end end
lib/wechat_pay/payment_methods/app.ex
0.755005
0.602354
app.ex
starcoder
defmodule UrbitEx.Terminal do alias UrbitEx.{Utils, API, Actions} ## error messages in dojo can be accessed through a completely separate SSE pipeline ## at GET "/~_~/slog" ## which inputs plaintext errors @moduledoc """ Module with functions to interact with `herm`, the Urbit virtual terminal. Subscription to herm will trigger an SSE pipeline outputting the return of every statement. Error messages are not shown in that channel, instead there is a separate SSE pipeline called `slog`. You can access that by running the `slog/1` function. """ @doc """ Subscribes to `herm` in order to receive SSE events from it. Takes a Session struct and a Channel struct. Returns `:ok` """ def subscribe(session, channel) do sub = %{app: "herm", path: "/session/"} API.subscribe(session, channel, [sub]) :ok end @doc """ Opens a SSE pipeline to receive unprompted Terminal logs and error messages. """ def slog(session) do headers = %{ "Connection" => "keep-alive", "Accept" => "text/event-stream", "Cookie" => session.cookie, "Cache-Control" => "no-cache", "User-Agent" => "UrbitEx" } sse_options = [stream_to: self(), recv_timeout: :infinity] HTTPoison.get(session.url<>"/~_~/slog", headers, sse_options) end @doc """ Sends a string to the terminal. Note this only types the string in, it does not enter the command. Takes a Session Struct, a Channel struct and the string to send. Returns an {:ok, %HTTPoison.Response{}} tuple. """ def send_string(session, channel, string), do: poke(session, channel, %{txt: [string]}) @doc """ Send a backspace to the terminal. Takes a Session Struct and a Channel struct. Returns an {:ok, %HTTPoison.Response{}} tuple. """ def backspace(session, channel), do: poke(session, channel, %{bac: nil}) @doc """ Send a delete keystroke (deleting the key next to the cursor) to the terminal. Takes a Session Struct and a Channel struct. Returns an {:ok, %HTTPoison.Response{}} tuple. """ def delete(session, channel), do: poke(session, channel, %{del: nil}) @doc """ Send an enter key stroke to the terminal, running the command in the prompt. Takes a Session Struct and a Channel struct. Returns an {:ok, %HTTPoison.Response{}} tuple. """ def return(session, channel), do: poke(session, channel, %{ret: nil}) @doc """ Send an arrow keystroke to the terminal. Takes a Session Struct, a Channel struct and the arrow to send, in atom form (`:up`, `:down`, `:left` or `:right`) Returns an {:ok, %HTTPoison.Response{}} tuple. """ def arrow(session, channel, arrow) do aro = case arrow do :up -> "u" :down -> "d" :left -> "l" :right -> "r" end poke(session, channel, %{aro: aro}) end @doc """ Send an keystroke with the "control" key pressed, to the terminal. Takes a Session Struct, a Channel struct and the key to send. For what it's worth, a tab is equivalent to CTRL+ "i". It gives a lists of functions in the hoon standard library. Returns an {:ok, %HTTPoison.Response{}} tuple. """ def mod(session, channel, modifier) do # tab is "i" poke(session, channel, %{ctl: modifier}) end @doc """ Sends a `|hi` to an Urbit ship. Useful command to troubleshoot network connections (akin to ping in Unix). Takes a Session Struct, a Channel struct and the Urbit @p to ping. Returns an {:ok, %HTTPoison.Response{}} tuple. """ def hi(session, channel, target) do patp = Utils.add_tilde(target) send_string(session, channel, "|hi #{patp}") return(session, channel) end defp poke(session, channel, json) do body = Actions.poke(session.ship, "herm", "belt", json) API.wrap_put(session, channel, [body]) end end
lib/api/gall/herm.ex
0.824037
0.541348
herm.ex
starcoder
defmodule Asteroid.OAuth2.Scope do @moduledoc """ Scope helper functions and default callbacks """ alias OAuth2Utils.Scope alias Asteroid.Context alias Asteroid.OAuth2 import Asteroid.Utils defmodule UnknownRequestedScopeError do @moduledoc """ Error return when an unknown scope has been reuqested """ defexception [:unknown_scopes] @type t :: %__MODULE__{ unknown_scopes: Scope.Set.t() } def message(%{unknown_scopes: unknown_scopes}) do case astrenv(:api_error_response_verbosity) do :debug -> "Unknown requested scope(s)" <> if unknown_scopes do " (#{Enum.join(unknown_scopes, " ")})" else "" end :normal -> "Unknown requested scope(s)" :minimal -> "" end end end @typedoc """ Individual scope configuration keys The processing rules are: - `:auto`: the scope will automatically be granted, even when not requested - `:advertise`: determine whether the scope is advertised on the `/.well-nown` URIs. Defaults to `true`. If set in a incoherent way within different flows, the behaviour is unspecified. - `:display`: in *web flows*, display that scope to the end-user for authorization. When not present, shall be treated as `true` - `:optional`: in *web flows*, make that scope optional, so that the user can deselect it even when this was requested by the client. When not present, shall be treated as `false` - `:label`: a map of internationalised labels of the scope, that will be displayed to the end-user. The map keys are ISO639 tags, and the values the internationalised text of the label - `:acceptable_acrs`: a list of ACRs for use in OIDC flows. When present, Asteroid will set the preferred ACR in the `t:AsteroiWeb.AuthorizeController.Request.t/0` to a value that satisfies this requirement if the scope is requested. It will return an error otherwise - `:max_refresh_token_lifetime`: *when present*, restricts the lifetime of a refresh token released when that scope is granted. This *supersedes global*, flow or client refresh token lifetime configuration - `:max_access_token_lifetime`: *when present*, restricts the lifetime of an access token released when that scope is granted. This *supersedes global*, flow or client acess token lifetime configuration """ @type scope_config_option_individual_scope_configuration :: {:auto, boolean()} | {:display, boolean()} | {:optional, boolean()} | {:label, %{required(String.t()) => String.t()}} | {:acceptable_acrs, [Asteroid.OIDC.acr()]} | {:max_refresh_token_lifetime, non_neg_integer()} | {:max_access_token_lifetime, non_neg_integer()} @typedoc """ Scope configuration option type """ @type scope_config_option :: [ {:scopes, %{required(String.t()) => [scope_config_option_individual_scope_configuration()]}} ] @doc """ Returns the merged scope configuration for a flow Scope configuration is merge at the key level of a individual scope configuration. """ @spec configuration_for_flow(OAuth2.flow()) :: scope_config_option() def configuration_for_flow(flow) when flow in [ :ropc, :client_credentials, :authorization_code, :implicit, :device_authorization, :oidc_authorization_code, :oidc_implicit, :oidc_hybrid ] do scope_config = astrenv(:scope_config) oauth2_scope_config = astrenv(:oauth2_scope_config) oauth2_flow_scope_config = case flow do :ropc -> astrenv(:oauth2_flow_ropc_scope_config, []) :client_credentials -> astrenv(:oauth2_flow_client_credentials_scope_config, []) :authorization_code -> astrenv(:oauth2_flow_authorization_code_scope_config, []) :implicit -> astrenv(:oauth2_flow_implicit_scope_config, []) :device_authorization -> astrenv(:oauth2_flow_device_authorization_scope_config, []) :oidc_authorization_code -> astrenv(:oidc_flow_authorization_code_scope_config, []) :oidc_implicit -> astrenv(:oidc_flow_implicit_scope_config, []) :oidc_hybrid -> astrenv(:oidc_flow_hybrid_scope_config, []) end merged_individual_scope_config = Enum.reduce( [scope_config, oauth2_scope_config, oauth2_flow_scope_config], %{}, fn conf, acc -> individual_scope_config = conf[:scopes] || %{} Map.merge(acc, individual_scope_config) end ) [scopes: merged_individual_scope_config] end @doc """ Given a set of scopes and a `t:scope_config_option/0`, returns the max refresh token lifetime or `nil` if not present """ @spec max_refresh_token_lifetime(Scope.Set.t(), scope_config_option()) :: non_neg_integer() | nil def max_refresh_token_lifetime(scopes, scope_config_option) do Enum.reduce( scopes, [], fn scope, acc -> case scope_config_option[:scopes][scope][:max_refresh_token_lifetime] do lifetime when is_integer(lifetime) -> acc ++ [lifetime] nil -> acc end end ) |> Enum.max(fn -> nil end) end @doc """ Given a set of scopes and a `t:scope_config_option/0`, returns the max access token lifetime or `nil` if not present """ @spec max_access_token_lifetime(Scope.Set.t(), scope_config_option()) :: non_neg_integer() | nil def max_access_token_lifetime(scopes, scope_config_option) do Enum.reduce( scopes, [], fn scope, acc -> case scope_config_option[:scopes][scope][:max_access_token_lifetime] do lifetime when is_integer(lifetime) -> acc ++ [lifetime] nil -> acc end end ) |> Enum.max(fn -> nil end) end @doc """ Returns the scopes available to a flow """ @spec scopes_for_flow(OAuth2.flow()) :: Scope.Set.t() def scopes_for_flow(flow) when flow in [ :ropc, :client_credentials, :authorization_code, :implicit, :device_authorization, :oidc_authorization_code, :oidc_implicit, :oidc_hybrid ] do Enum.reduce( configuration_for_flow(flow)[:scopes] || %{}, Scope.Set.new(), fn {scope, _}, acc -> Scope.Set.put(acc, scope) end ) end @doc """ Returns `:ok` if the scopes are enabled for the given flow, false otherwise """ @spec scopes_enabled?(Scope.Set.t(), OAuth2.flow()) :: :ok | {:error, %UnknownRequestedScopeError{}} def scopes_enabled?(scopes, flow) do enabled_scopes_for_flow = scopes_for_flow(flow) if Scope.Set.subset?(scopes, enabled_scopes_for_flow) do :ok else {:error, UnknownRequestedScopeError.exception( unknown_scopes: Scope.Set.difference(scopes, enabled_scopes_for_flow) )} end end @doc """ Computes scopes to grant during requests Note that the list of scopes allowed for a client is directly configured in the client's attribute repository. ## ROPC flow The functions adds the scopes marked as `auto: true` in accordance to the #{Asteroid.Config.link_to_option(:oauth2_flow_ropc_scope_config)} configuration option, only during the initial request (when the username and password parameters are provided). On further token renewal requests the released scopes are the ones requested and already granted during the initial request, or a subset of them. ## Client credentials flow The functions adds the scopes marked as `auto: true` in accordance to the #{Asteroid.Config.link_to_option(:oauth2_flow_client_credentials_scope_config)} configuration option, only during the initial request. On further token renewal requests the released scopes are the ones requested and already granted during the initial request, or a subset of them, although you should probably not use refresh tokens in such a flow. ## Authorization code flow The functions adds the scopes marked as `auto: true` in accordance to the #{Asteroid.Config.link_to_option(:oauth2_flow_authorization_code_scope_config)} configuration option when the web flow on the `/authorize` endpoint successfully concludes. ## Implicit flow The functions adds the scopes marked as `auto: true` in accordance to the #{Asteroid.Config.link_to_option(:oauth2_flow_implicit_scope_config)} configuration option when the web flow on the `/authorize` endpoint successfully concludes. ## Device authorization flow During the initial phase of the flow, when the client requests a device code on the `/api/oauth2/device_authorization` endpoint, this function does not change the scopes. The functions adds the scopes marked as `auto: true` in accordance to the #{Asteroid.Config.link_to_option(:oauth2_flow_device_authorization_scope_config)} configuration option when the web flow on the `/device` endpoint successfully concludes. """ @spec grant_for_flow(Scope.Set.t(), Context.t()) :: Scope.Set.t() def grant_for_flow(scopes, %{flow: :ropc, grant_type: :password}) do Enum.reduce( astrenv(:oauth2_flow_ropc_scope_config) || [], scopes, fn {scope, scope_config}, acc -> if scope_config[:auto] do Scope.Set.put(acc, scope) else acc end end ) end def grant_for_flow(scopes, %{flow: :client_credentials, grant_type: :client_credentials}) do Enum.reduce( astrenv(:oauth2_flow_client_credentials_scope_config) || [], scopes, fn {scope, scope_config}, acc -> if scope_config[:auto] do Scope.Set.put(acc, scope) else acc end end ) end def grant_for_flow(scopes, %{endpoint: :authorize, flow: :authorization_code}) do Enum.reduce( astrenv(:oauth2_flow_authorization_code_scope_config) || [], scopes, fn {scope, scope_config}, acc -> if scope_config[:auto] do Scope.Set.put(acc, scope) else acc end end ) end def grant_for_flow(scopes, %{endpoint: :authorize, flow: :implicit}) do Enum.reduce( astrenv(:oauth2_flow_implicit_scope_config) || [], scopes, fn {scope, scope_config}, acc -> if scope_config[:auto] do Scope.Set.put(acc, scope) else acc end end ) end def grant_for_flow(scopes, %{flow: :device_authorization, endpoint: :device_authorization}) do scopes end def grant_for_flow(scopes, %{flow: :device_authorization, endpoint: :device}) do Enum.reduce( astrenv(:oauth2_flow_device_authorization_scope_config) || [], scopes, fn {scope, scope_config}, acc -> if scope_config[:auto] do Scope.Set.put(acc, scope) else acc end end ) end def grant_for_flow(scopes, %{endpoint: :authorize, flow: :oidc_authorization_code}) do Enum.reduce( astrenv(:oidc_flow_authorization_code_scope_config) || [], scopes, fn {scope, scope_config}, acc -> if scope_config[:auto] do Scope.Set.put(acc, scope) else acc end end ) end def grant_for_flow(scopes, %{endpoint: :authorize, flow: :oidc_implicit}) do Enum.reduce( astrenv(:oidc_flow_implicit_scope_config) || [], scopes, fn {scope, scope_config}, acc -> if scope_config[:auto] do Scope.Set.put(acc, scope) else acc end end ) end def grant_for_flow(scopes, %{endpoint: :authorize, flow: :oidc_hybrid}) do Enum.reduce( astrenv(:oidc_flow_hybrid_scope_config) || [], scopes, fn {scope, scope_config}, acc -> if scope_config[:auto] do Scope.Set.put(acc, scope) else acc end end ) end end
lib/asteroid/oauth2/scope.ex
0.872646
0.489992
scope.ex
starcoder
defmodule Plausible.Stats do use Plausible.Repo alias Plausible.Stats.Query def compare_pageviews_and_visitors(site, query, {pageviews, visitors}) do query = Query.shift_back(query) {old_pageviews, old_visitors} = pageviews_and_visitors(site, query) cond do old_pageviews == 0 and pageviews > 0 -> {100, 100} old_pageviews == 0 and pageviews == 0 -> {0, 0} true -> { round((pageviews - old_pageviews) / old_pageviews * 100), round((visitors - old_visitors) / old_visitors * 100), } end end def calculate_plot(site, %Query{step_type: "month"} = query) do steps = Enum.map((query.steps - 1)..0, fn shift -> Timex.now(site.timezone) |> Timex.beginning_of_month |> Timex.shift(months: -shift) |> DateTime.to_date end) groups = Repo.all( from e in base_query(site, %{query | filters: %{}}), group_by: 1, order_by: 1, select: {fragment("date_trunc('month', ? at time zone 'utc' at time zone ?)", e.timestamp, ^site.timezone), count(e.fingerprint, :distinct)} ) |> Enum.into(%{}) |> transform_keys(fn dt -> NaiveDateTime.to_date(dt) end) compare_groups = if query.filters["goal"] do Repo.all( from e in base_query(site, query), group_by: 1, order_by: 1, select: {fragment("date_trunc('month', ? at time zone 'utc' at time zone ?)", e.timestamp, ^site.timezone), count(e.fingerprint, :distinct)} ) |> Enum.into(%{}) |> transform_keys(fn dt -> NaiveDateTime.to_date(dt) end) end present_index = Enum.find_index(steps, fn step -> step == Timex.now(site.timezone) |> Timex.to_date |> Timex.beginning_of_month end) plot = Enum.map(steps, fn step -> groups[step] || 0 end) compare_plot = compare_groups && Enum.map(steps, fn step -> compare_groups[step] || 0 end) labels = Enum.map(steps, fn step -> Timex.format!(step, "{ISOdate}") end) {plot, compare_plot, labels, present_index} end def calculate_plot(site, %Query{step_type: "date"} = query) do steps = Enum.into(query.date_range, []) groups = Repo.all( from e in base_query(site, %{ query | filters: %{} }), group_by: 1, order_by: 1, select: {fragment("date_trunc('day', ? at time zone 'utc' at time zone ?)", e.timestamp, ^site.timezone), count(e.fingerprint, :distinct)} ) |> Enum.into(%{}) |> transform_keys(fn dt -> NaiveDateTime.to_date(dt) end) compare_groups = if query.filters["goal"] do Repo.all( from e in base_query(site, query), group_by: 1, order_by: 1, select: {fragment("date_trunc('day', ? at time zone 'utc' at time zone ?)", e.timestamp, ^site.timezone), count(e.fingerprint, :distinct)} ) |> Enum.into(%{}) |> transform_keys(fn dt -> NaiveDateTime.to_date(dt) end) end present_index = Enum.find_index(steps, fn step -> step == Timex.now(site.timezone) |> Timex.to_date end) steps_to_show = if present_index, do: present_index + 1, else: Enum.count(steps) plot = Enum.map(steps, fn step -> groups[step] || 0 end) |> Enum.take(steps_to_show) compare_plot = compare_groups && Enum.map(steps, fn step -> compare_groups[step] || 0 end) labels = Enum.map(steps, fn step -> Timex.format!(step, "{ISOdate}") end) {plot, compare_plot, labels, present_index} end def calculate_plot(site, %Query{step_type: "hour"} = query) do {:ok, beginning_of_day} = NaiveDateTime.new(query.date_range.first, ~T[00:00:00]) steps = Enum.map(0..23, fn shift -> beginning_of_day |> Timex.shift(hours: shift) |> truncate_to_hour |> NaiveDateTime.truncate(:second) end) groups = Repo.all( from e in base_query(site, %{query | filters: %{}}), group_by: 1, order_by: 1, select: {fragment("date_trunc('hour', ? at time zone 'utc' at time zone ?)", e.timestamp, ^site.timezone), count(e.fingerprint, :distinct)} ) |> Enum.into(%{}) |> transform_keys(fn dt -> NaiveDateTime.truncate(dt, :second) end) compare_groups = if query.filters["goal"] do Repo.all( from e in base_query(site, query), group_by: 1, order_by: 1, select: {fragment("date_trunc('hour', ? at time zone 'utc' at time zone ?)", e.timestamp, ^site.timezone), count(e.fingerprint, :distinct)} ) |> Enum.into(%{}) |> transform_keys(fn dt -> NaiveDateTime.truncate(dt, :second) end) end present_index = Enum.find_index(steps, fn step -> step == Timex.now(site.timezone) |> truncate_to_hour |> NaiveDateTime.truncate(:second) end) steps_to_show = if present_index, do: present_index + 1, else: Enum.count(steps) plot = Enum.map(steps, fn step -> groups[step] || 0 end) |> Enum.take(steps_to_show) compare_plot = compare_groups && Enum.map(steps, fn step -> compare_groups[step] || 0 end) labels = Enum.map(steps, fn step -> NaiveDateTime.to_iso8601(step) end) {plot, compare_plot, labels, present_index} end def bounce_rate(site, query) do {first_datetime, last_datetime} = date_range_utc_boundaries(query.date_range, site.timezone) sessions_query = from(s in Plausible.FingerprintSession, where: s.domain == ^site.domain, where: s.start >= ^first_datetime and s.start < ^last_datetime ) total_sessions = Repo.one( from s in sessions_query, select: count(s)) bounced_sessions = Repo.one(from s in sessions_query, where: s.is_bounce, select: count(s)) case total_sessions do 0 -> 0 total -> round(bounced_sessions / total * 100) end end def pageviews_and_visitors(site, query) do Repo.one(from( e in base_query(site, query), select: {count(e.id), count(e.fingerprint, :distinct)} )) end def unique_visitors(site, query) do Repo.one(from( e in base_query(site, query), select: count(e.fingerprint, :distinct) )) end def top_referrers_for_goal(site, query, limit \\ 5) do Repo.all(from e in base_query(site, query), select: %{name: e.initial_referrer_source, url: min(e.initial_referrer), count: count(e.fingerprint, :distinct)}, group_by: e.initial_referrer_source, where: not is_nil(e.initial_referrer_source), order_by: [desc: 3], limit: ^limit ) |> Enum.map(fn ref -> Map.update(ref, :url, nil, fn url -> url && URI.parse("http://" <> url).host end) end) end def top_referrers(site, query, limit \\ 5, include \\ []) do referrers = Repo.all(from e in base_query(site, query), select: %{name: e.referrer_source, url: min(e.referrer), count: count(e.fingerprint, :distinct)}, group_by: e.referrer_source, where: not is_nil(e.referrer_source), order_by: [desc: 3], limit: ^limit ) |> Enum.map(fn ref -> Map.update(ref, :url, nil, fn url -> url && URI.parse("http://" <> url).host end) end) if "bounce_rate" in include do bounce_rates = bounce_rates_by_referrer_source(site, query, Enum.map(referrers, fn ref -> ref[:name] end)) Enum.map(referrers, fn referrer -> Map.put(referrer, :bounce_rate, bounce_rates[referrer[:name]]) end) else referrers end end defp bounce_rates_by_referrer_source(site, query, referrers) do {first_datetime, last_datetime} = date_range_utc_boundaries(query.date_range, site.timezone) total_sessions_by_referrer = Repo.all( from s in Plausible.FingerprintSession, where: s.domain == ^site.domain, where: s.start >= ^first_datetime and s.start < ^last_datetime, where: s.referrer_source in ^referrers, group_by: s.referrer_source, select: {s.referrer_source, count(s.id)} ) |> Enum.into(%{}) bounced_sessions_by_referrer = Repo.all( from s in Plausible.FingerprintSession, where: s.domain == ^site.domain, where: s.start >= ^first_datetime and s.start < ^last_datetime, where: s.is_bounce, where: s.referrer_source in ^referrers, group_by: s.referrer_source, select: {s.referrer_source, count(s.id)} ) |> Enum.into(%{}) Enum.reduce(referrers, %{}, fn referrer, acc -> total_sessions = Map.get(total_sessions_by_referrer, referrer, 0) bounced_sessions = Map.get(bounced_sessions_by_referrer, referrer, 0) bounce_rate = if total_sessions > 0 do round(bounced_sessions / total_sessions * 100) end Map.put(acc, referrer, bounce_rate) end) end def visitors_from_referrer(site, query, referrer) do Repo.one( from e in base_query(site, query), select: count(e.fingerprint, :distinct), where: e.referrer_source == ^referrer ) end def conversions_from_referrer(site, query, referrer) do Repo.one( from e in base_query(site, query), select: count(e.fingerprint, :distinct), where: e.initial_referrer_source == ^referrer ) end def referrer_drilldown(site, query, referrer, include \\ []) do referring_urls = Repo.all( from e in base_query(site, query), select: %{name: e.referrer, count: count(e.fingerprint, :distinct)}, group_by: e.referrer, where: e.referrer_source == ^referrer, order_by: [desc: 2], limit: 100 ) referring_urls = if "bounce_rate" in include do bounce_rates = bounce_rates_by_referring_url(site, query, Enum.map(referring_urls, fn ref -> ref[:name] end)) Enum.map(referring_urls, fn url -> Map.put(url, :bounce_rate, bounce_rates[url[:name]]) end) else referring_urls end if referrer == "Twitter" do urls = Enum.map(referring_urls, &(&1[:name])) tweets = Repo.all( from t in Plausible.Twitter.Tweet, where: t.link in ^urls ) |> Enum.group_by(&(&1.link)) Enum.map(referring_urls, fn url -> Map.put(url, :tweets, tweets[url[:name]]) end) else referring_urls end end def referrer_drilldown_for_goal(site, query, referrer) do Repo.all( from e in base_query(site, query), select: %{name: e.initial_referrer, count: count(e.fingerprint, :distinct)}, group_by: e.initial_referrer, where: e.initial_referrer_source == ^referrer, order_by: [desc: 2], limit: 100 ) end defp bounce_rates_by_referring_url(site, query, referring_urls) do {first_datetime, last_datetime} = date_range_utc_boundaries(query.date_range, site.timezone) total_sessions_by_url = Repo.all( from s in Plausible.FingerprintSession, where: s.domain == ^site.domain, where: s.start >= ^first_datetime and s.start < ^last_datetime, where: s.referrer in ^referring_urls, group_by: s.referrer, select: {s.referrer, count(s.id)} ) |> Enum.into(%{}) bounced_sessions_by_url = Repo.all( from s in Plausible.FingerprintSession, where: s.domain == ^site.domain, where: s.start >= ^first_datetime and s.start < ^last_datetime, where: s.is_bounce, where: s.referrer in ^referring_urls, group_by: s.referrer, select: {s.referrer, count(s.id)} ) |> Enum.into(%{}) Enum.reduce(referring_urls, %{}, fn url, acc -> total_sessions = Map.get(total_sessions_by_url, url, 0) bounced_sessions = Map.get(bounced_sessions_by_url, url, 0) bounce_rate = if total_sessions > 0 do round(bounced_sessions / total_sessions * 100) end Map.put(acc, url, bounce_rate) end) end def top_pages(site, query, limit \\ 5, include \\ []) do pages = Repo.all(from e in base_query(site, query), select: %{name: e.pathname, count: count(e.pathname)}, group_by: e.pathname, order_by: [desc: count(e.pathname)], limit: ^limit ) if "bounce_rate" in include do bounce_rates = bounce_rates_by_page_url(site, query, Enum.map(pages, fn page -> page[:name] end)) Enum.map(pages, fn url -> Map.put(url, :bounce_rate, bounce_rates[url[:name]]) end) else pages end end defp bounce_rates_by_page_url(site, query, page_urls) do {first_datetime, last_datetime} = date_range_utc_boundaries(query.date_range, site.timezone) total_sessions_by_url = Repo.all( from s in Plausible.FingerprintSession, where: s.domain == ^site.domain, where: s.start >= ^first_datetime and s.start < ^last_datetime, where: s.entry_page in ^page_urls, group_by: s.entry_page, select: {s.entry_page, count(s.id)} ) |> Enum.into(%{}) bounced_sessions_by_url = Repo.all( from s in Plausible.FingerprintSession, where: s.domain == ^site.domain, where: s.start >= ^first_datetime and s.start < ^last_datetime, where: s.is_bounce, where: s.entry_page in ^page_urls, group_by: s.entry_page, select: {s.entry_page, count(s.id)} ) |> Enum.into(%{}) Enum.reduce(page_urls, %{}, fn url, acc -> total_sessions = Map.get(total_sessions_by_url, url, 0) bounced_sessions = Map.get(bounced_sessions_by_url, url, 0) bounce_rate = if total_sessions > 0 do round(bounced_sessions / total_sessions * 100) end Map.put(acc, url, bounce_rate) end) end defp add_percentages(stat_list) do total = Enum.reduce(stat_list, 0, fn %{count: count}, total -> total + count end) Enum.map(stat_list, fn stat -> Map.put(stat, :percentage, round(stat[:count] / total * 100)) end) end @available_screen_sizes ["Desktop", "Laptop", "Tablet", "Mobile"] def top_screen_sizes(site, query) do Repo.all(from e in base_query(site, query), select: %{name: e.screen_size, count: count(e.fingerprint, :distinct)}, group_by: e.screen_size, where: not is_nil(e.screen_size) ) |> Enum.sort(fn %{name: screen_size1}, %{name: screen_size2} -> index1 = Enum.find_index(@available_screen_sizes, fn s -> s == screen_size1 end) index2 = Enum.find_index(@available_screen_sizes, fn s -> s == screen_size2 end) index2 > index1 end) |> add_percentages end def countries(site, query) do Repo.all(from e in base_query(site, query), select: %{name: e.country_code, count: count(e.fingerprint, :distinct)}, group_by: e.country_code, where: not is_nil(e.country_code), order_by: [desc: 2] ) |> Enum.map(fn stat -> two_letter_code = stat[:name] stat |> Map.put(:name, Plausible.Stats.CountryName.to_alpha3(two_letter_code)) |> Map.put(:full_country_name, Plausible.Stats.CountryName.from_iso3166(two_letter_code)) end) |> add_percentages end def browsers(site, query, limit \\ 5) do Repo.all(from e in base_query(site, query), select: %{name: e.browser, count: count(e.fingerprint, :distinct)}, group_by: e.browser, where: not is_nil(e.browser), order_by: [desc: 2] ) |> add_percentages |> Enum.take(limit) end def operating_systems(site, query, limit \\ 5) do Repo.all(from e in base_query(site, query), select: %{name: e.operating_system, count: count(e.fingerprint, :distinct)}, group_by: e.operating_system, where: not is_nil(e.operating_system), order_by: [desc: 2] ) |> add_percentages |> Enum.take(limit) end def current_visitors(site) do Repo.one( from e in Plausible.Event, where: e.timestamp >= fragment("(now() at time zone 'utc') - '5 minutes'::interval"), where: e.domain == ^site.domain, select: count(e.fingerprint, :distinct) ) end def goal_conversions(site, %Query{filters: %{"goal" => goal}} = query) when is_binary(goal) do Repo.all(from e in base_query(site, query), select: count(e.fingerprint, :distinct), group_by: e.name, order_by: [desc: 1] ) |> Enum.map(fn count -> %{name: goal, count: count} end) end def goal_conversions(site, query) do goals = Repo.all(from g in Plausible.Goal, where: g.domain == ^site.domain) fetch_pageview_goals(goals, site, query) ++ fetch_event_goals(goals, site, query) |> sort_conversions() end defp fetch_event_goals(goals, site, query) do events = Enum.map(goals, fn goal -> goal.event_name end) |> Enum.filter(&(&1)) if Enum.count(events) > 0 do Repo.all( from e in base_query(site, query, events), group_by: e.name, select: %{name: e.name, count: count(e.fingerprint, :distinct)} ) else [] end end defp fetch_pageview_goals(goals, site, query) do pages = Enum.map(goals, fn goal -> goal.page_path end) |> Enum.filter(&(&1)) if Enum.count(pages) > 0 do Repo.all( from e in base_query(site, query), where: e.pathname in ^pages, group_by: e.pathname, select: %{name: fragment("concat('Visit ', ?)", e.pathname), count: count(e.fingerprint, :distinct)} ) else [] end end defp sort_conversions(conversions) do Enum.sort_by(conversions, fn conversion -> -conversion[:count] end) end defp base_query(site, query, events \\ ["pageview"]) do {first_datetime, last_datetime} = date_range_utc_boundaries(query.date_range, site.timezone) {goal_event, path} = event_name_for_goal(query) q = from(e in Plausible.Event, where: e.domain == ^site.domain, where: e.timestamp >= ^first_datetime and e.timestamp < ^last_datetime ) q = if path do from(e in q, where: e.pathname == ^path) else q end if goal_event do from(e in q, where: e.name == ^goal_event) else from(e in q, where: e.name in ^events) end end defp date_range_utc_boundaries(date_range, timezone) do {:ok, first} = NaiveDateTime.new(date_range.first, ~T[00:00:00]) first_datetime = Timex.to_datetime(first, timezone) |> Timex.Timezone.convert("UTC") {:ok, last} = NaiveDateTime.new(date_range.last |> Timex.shift(days: 1), ~T[00:00:00]) last_datetime = Timex.to_datetime(last, timezone) |> Timex.Timezone.convert("UTC") {first_datetime, last_datetime} end defp event_name_for_goal(query) do case query.filters["goal"] do "Visit " <> page -> {"pageview", page} goal when is_binary(goal) -> {goal, nil} _ -> {nil, nil} end end defp transform_keys(map, fun) do for {key, val} <- map, into: %{} do {fun.(key), val} end end defp truncate_to_hour(datetime) do {:ok, datetime} = NaiveDateTime.new(datetime.year, datetime.month, datetime.day, datetime.hour, 0, 0, 0) datetime end end
lib/plausible/stats/stats.ex
0.644673
0.423458
stats.ex
starcoder
defmodule AWS.SecurityHub do @moduledoc """ Security Hub provides you with a comprehensive view of the security state of your AWS environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from AWS accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the * [AWS Security Hub User Guide](https://docs.aws.amazon.com/securityhub/latest/userguide/what-is-securityhub.html) *. When you use operations in the Security Hub API, the requests are executed only in the AWS Region that is currently active or in the specific AWS Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, execute the same command for each Region to apply the change to. For example, if your Region is set to `us-west-2`, when you use ` `CreateMembers` ` to add a member account to Security Hub, the association of the member account with the master account is created only in the `us-west-2` Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from. The following throttling limits apply to using Security Hub API operations. <ul> <li> ` `GetFindings` ` - `RateLimit` of 3 requests per second. `BurstLimit` of 6 requests per second. </li> <li> ` `UpdateFindings` ` - `RateLimit` of 1 request per second. `BurstLimit` of 5 requests per second. </li> <li> All other operations - `RateLimit` of 10 requests per second. `BurstLimit` of 30 requests per second. </li> </ul> """ @doc """ Accepts the invitation to be a member account and be monitored by the Security Hub master account that the invitation was sent from. When the member account accepts the invitation, permission is granted to the master account to view findings generated in the member account. """ def accept_invitation(client, input, options \\ []) do path_ = "/master" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Disables the standards specified by the provided `StandardsSubscriptionArns`. For more information, see [Security Standards](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-standards.html) section of the *AWS Security Hub User Guide*. """ def batch_disable_standards(client, input, options \\ []) do path_ = "/standards/deregister" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Enables the standards specified by the provided `StandardsArn`. To obtain the ARN for a standard, use the ` `DescribeStandards` ` operation. For more information, see the [Security Standards](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-standards.html) section of the *AWS Security Hub User Guide*. """ def batch_enable_standards(client, input, options \\ []) do path_ = "/standards/register" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Imports security findings generated from an integrated third-party product into Security Hub. This action is requested by the integrated product to import its findings into Security Hub. The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb. After a finding is created, `BatchImportFindings` cannot be used to update the following finding fields and objects, which Security Hub customers use to manage their investigation workflow. <ul> <li> `Confidence` </li> <li> `Criticality` </li> <li> `Note` </li> <li> `RelatedFindings` </li> <li> `Severity` </li> <li> `Types` </li> <li> `UserDefinedFields` </li> <li> `VerificationState` </li> <li> `Workflow` </li> </ul> """ def batch_import_findings(client, input, options \\ []) do path_ = "/findings/import" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Used by Security Hub customers to update information about their investigation into a finding. Requested by master accounts or member accounts. Master accounts can update findings for their account and their member accounts. Member accounts can update findings for their account. Updates from `BatchUpdateFindings` do not affect the value of `UpdatedAt` for a finding. Master accounts can use `BatchUpdateFindings` to update the following finding fields and objects. <ul> <li> `Confidence` </li> <li> `Criticality` </li> <li> `Note` </li> <li> `RelatedFindings` </li> <li> `Severity` </li> <li> `Types` </li> <li> `UserDefinedFields` </li> <li> `VerificationState` </li> <li> `Workflow` </li> </ul> Member accounts can only use `BatchUpdateFindings` to update the Note object. """ def batch_update_findings(client, input, options \\ []) do path_ = "/findings/batchupdate" headers = [] query_ = [] request(client, :patch, path_, query_, headers, input, options, nil) end @doc """ Creates a custom action target in Security Hub. You can use custom actions on findings and insights in Security Hub to trigger target actions in Amazon CloudWatch Events. """ def create_action_target(client, input, options \\ []) do path_ = "/actionTargets" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Creates a custom insight in Security Hub. An insight is a consolidation of findings that relate to a security issue that requires attention or remediation. To group the related findings in the insight, use the `GroupByAttribute`. """ def create_insight(client, input, options \\ []) do path_ = "/insights" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the master account. To successfully create a member, you must use this action from an account that already has Security Hub enabled. To enable Security Hub, you can use the ` `EnableSecurityHub` ` operation. After you use `CreateMembers` to create member account associations in Security Hub, you must use the ` `InviteMembers` ` operation to invite the accounts to enable Security Hub and become member accounts in Security Hub. If the account owner accepts the invitation, the account becomes a member account in Security Hub. A permissions policy is added that permits the master account to view the findings generated in the member account. When Security Hub is enabled in the invited account, findings start to be sent to both the member and master accounts. To remove the association between the master and member accounts, use the ` `DisassociateFromMasterAccount` ` or ` `DisassociateMembers` ` operation. """ def create_members(client, input, options \\ []) do path_ = "/members" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Declines invitations to become a member account. """ def decline_invitations(client, input, options \\ []) do path_ = "/invitations/decline" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Deletes a custom action target from Security Hub. Deleting a custom action target does not affect any findings or insights that were already sent to Amazon CloudWatch Events using the custom action. """ def delete_action_target(client, action_target_arn, input, options \\ []) do path_ = "/actionTargets/#{AWS.Util.encode_uri(action_target_arn, true)}" headers = [] query_ = [] request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Deletes the insight specified by the `InsightArn`. """ def delete_insight(client, insight_arn, input, options \\ []) do path_ = "/insights/#{AWS.Util.encode_uri(insight_arn, true)}" headers = [] query_ = [] request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Deletes invitations received by the AWS account to become a member account. """ def delete_invitations(client, input, options \\ []) do path_ = "/invitations/delete" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Deletes the specified member accounts from Security Hub. """ def delete_members(client, input, options \\ []) do path_ = "/members/delete" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Returns a list of the custom action targets in Security Hub in your account. """ def describe_action_targets(client, input, options \\ []) do path_ = "/actionTargets/get" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Returns details about the Hub resource in your account, including the `HubArn` and the time when you enabled Security Hub. """ def describe_hub(client, hub_arn \\ nil, options \\ []) do path_ = "/accounts" headers = [] query_ = [] query_ = if !is_nil(hub_arn) do [{"HubArn", hub_arn} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns information about the available products that you can subscribe to and integrate with Security Hub in order to consolidate findings. """ def describe_products(client, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/products" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"NextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"MaxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns a list of the available standards in Security Hub. For each standard, the results include the standard ARN, the name, and a description. """ def describe_standards(client, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/standards" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"NextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"MaxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns a list of security standards controls. For each control, the results include information about whether it is currently enabled, the severity, and a link to remediation information. """ def describe_standards_controls(client, standards_subscription_arn, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/standards/controls/#{AWS.Util.encode_uri(standards_subscription_arn, true)}" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"NextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"MaxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Disables the integration of the specified product with Security Hub. After the integration is disabled, findings from that product are no longer sent to Security Hub. """ def disable_import_findings_for_product(client, product_subscription_arn, input, options \\ []) do path_ = "/productSubscriptions/#{AWS.Util.encode_uri(product_subscription_arn, true)}" headers = [] query_ = [] request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Disables Security Hub in your account only in the current Region. To disable Security Hub in all Regions, you must submit one request per Region where you have enabled Security Hub. When you disable Security Hub for a master account, it doesn't disable Security Hub for any associated member accounts. When you disable Security Hub, your existing findings and insights and any Security Hub configuration settings are deleted after 90 days and cannot be recovered. Any standards that were enabled are disabled, and your master and member account associations are removed. If you want to save your existing findings, you must export them before you disable Security Hub. """ def disable_security_hub(client, input, options \\ []) do path_ = "/accounts" headers = [] query_ = [] request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Disassociates the current Security Hub member account from the associated master account. """ def disassociate_from_master_account(client, input, options \\ []) do path_ = "/master/disassociate" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Disassociates the specified member accounts from the associated master account. """ def disassociate_members(client, input, options \\ []) do path_ = "/members/disassociate" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Enables the integration of a partner product with Security Hub. Integrated products send findings to Security Hub. When you enable a product integration, a permissions policy that grants permission for the product to send findings to Security Hub is applied. """ def enable_import_findings_for_product(client, input, options \\ []) do path_ = "/productSubscriptions" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Enables Security Hub for your account in the current Region or the Region you specify in the request. When you enable Security Hub, you grant to Security Hub the permissions necessary to gather findings from other services that are integrated with Security Hub. When you use the `EnableSecurityHub` operation to enable Security Hub, you also automatically enable the following standards. <ul> <li> CIS AWS Foundations </li> <li> AWS Foundational Security Best Practices </li> </ul> You do not enable the Payment Card Industry Data Security Standard (PCI DSS) standard. To not enable the automatically enabled standards, set `EnableDefaultStandards` to `false`. After you enable Security Hub, to enable a standard, use the ` `BatchEnableStandards` ` operation. To disable a standard, use the ` `BatchDisableStandards` ` operation. To learn more, see [Setting Up AWS Security Hub](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-settingup.html) in the *AWS Security Hub User Guide*. """ def enable_security_hub(client, input, options \\ []) do path_ = "/accounts" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Returns a list of the standards that are currently enabled. """ def get_enabled_standards(client, input, options \\ []) do path_ = "/standards/get" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Returns a list of findings that match the specified criteria. """ def get_findings(client, input, options \\ []) do path_ = "/findings" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Lists the results of the Security Hub insight specified by the insight ARN. """ def get_insight_results(client, insight_arn, options \\ []) do path_ = "/insights/results/#{AWS.Util.encode_uri(insight_arn, true)}" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Lists and describes insights for the specified insight ARNs. """ def get_insights(client, input, options \\ []) do path_ = "/insights/get" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Returns the count of all Security Hub membership invitations that were sent to the current member account, not including the currently accepted invitation. """ def get_invitations_count(client, options \\ []) do path_ = "/invitations/count" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Provides the details for the Security Hub master account for the current member account. """ def get_master_account(client, options \\ []) do path_ = "/master" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns the details for the Security Hub member accounts for the specified account IDs. """ def get_members(client, input, options \\ []) do path_ = "/members/get" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Invites other AWS accounts to become member accounts for the Security Hub master account that the invitation is sent from. Before you can use this action to invite a member, you must first use the ` `CreateMembers` ` action to create the member account in Security Hub. When the account owner accepts the invitation to become a member account and enables Security Hub, the master account can view the findings generated from the member account. """ def invite_members(client, input, options \\ []) do path_ = "/members/invite" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Lists all findings-generating solutions (products) that you are subscribed to receive findings from in Security Hub. """ def list_enabled_products_for_import(client, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/productSubscriptions" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"NextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"MaxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Lists all Security Hub membership invitations that were sent to the current AWS account. """ def list_invitations(client, max_results \\ nil, next_token \\ nil, options \\ []) do path_ = "/invitations" headers = [] query_ = [] query_ = if !is_nil(next_token) do [{"NextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"MaxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Lists details about all member accounts for the current Security Hub master account. """ def list_members(client, max_results \\ nil, next_token \\ nil, only_associated \\ nil, options \\ []) do path_ = "/members" headers = [] query_ = [] query_ = if !is_nil(only_associated) do [{"OnlyAssociated", only_associated} | query_] else query_ end query_ = if !is_nil(next_token) do [{"NextToken", next_token} | query_] else query_ end query_ = if !is_nil(max_results) do [{"MaxResults", max_results} | query_] else query_ end request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Returns a list of tags associated with a resource. """ def list_tags_for_resource(client, resource_arn, options \\ []) do path_ = "/tags/#{URI.encode(resource_arn)}" headers = [] query_ = [] request(client, :get, path_, query_, headers, nil, options, nil) end @doc """ Adds one or more tags to a resource. """ def tag_resource(client, resource_arn, input, options \\ []) do path_ = "/tags/#{URI.encode(resource_arn)}" headers = [] query_ = [] request(client, :post, path_, query_, headers, input, options, nil) end @doc """ Removes one or more tags from a resource. """ def untag_resource(client, resource_arn, input, options \\ []) do path_ = "/tags/#{URI.encode(resource_arn)}" headers = [] {query_, input} = [ {"TagKeys", "tagKeys"}, ] |> AWS.Request.build_params(input) request(client, :delete, path_, query_, headers, input, options, nil) end @doc """ Updates the name and description of a custom action target in Security Hub. """ def update_action_target(client, action_target_arn, input, options \\ []) do path_ = "/actionTargets/#{AWS.Util.encode_uri(action_target_arn, true)}" headers = [] query_ = [] request(client, :patch, path_, query_, headers, input, options, nil) end @doc """ `UpdateFindings` is deprecated. Instead of `UpdateFindings`, use `BatchUpdateFindings`. Updates the `Note` and `RecordState` of the Security Hub-aggregated findings that the filter attributes specify. Any member account that can view the finding also sees the update to the finding. """ def update_findings(client, input, options \\ []) do path_ = "/findings" headers = [] query_ = [] request(client, :patch, path_, query_, headers, input, options, nil) end @doc """ Updates the Security Hub insight identified by the specified insight ARN. """ def update_insight(client, insight_arn, input, options \\ []) do path_ = "/insights/#{AWS.Util.encode_uri(insight_arn, true)}" headers = [] query_ = [] request(client, :patch, path_, query_, headers, input, options, nil) end @doc """ Updates configuration options for Security Hub. """ def update_security_hub_configuration(client, input, options \\ []) do path_ = "/accounts" headers = [] query_ = [] request(client, :patch, path_, query_, headers, input, options, nil) end @doc """ Used to control whether an individual security standard control is enabled or disabled. """ def update_standards_control(client, standards_control_arn, input, options \\ []) do path_ = "/standards/control/#{AWS.Util.encode_uri(standards_control_arn, true)}" headers = [] query_ = [] request(client, :patch, path_, query_, headers, input, options, nil) end @spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) :: {:ok, Poison.Parser.t(), Poison.Response.t()} | {:error, Poison.Parser.t()} | {:error, HTTPoison.Error.t()} defp request(client, method, path, query, headers, input, options, success_status_code) do client = %{client | service: "securityhub"} host = build_host("securityhub", client) url = host |> build_url(path, client) |> add_query(query) additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}] headers = AWS.Request.add_headers(additional_headers, headers) payload = encode_payload(input) headers = AWS.Request.sign_v4(client, method, url, headers, payload) perform_request(method, url, payload, headers, options, success_status_code) end defp perform_request(method, url, payload, headers, options, nil) do case HTTPoison.request(method, url, payload, headers, options) do {:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} -> {:ok, response} {:ok, %HTTPoison.Response{status_code: status_code, body: body} = response} when status_code == 200 or status_code == 202 or status_code == 204 -> {:ok, Poison.Parser.parse!(body, %{}), response} {:ok, %HTTPoison.Response{body: body}} -> error = Poison.Parser.parse!(body, %{}) {:error, error} {:error, %HTTPoison.Error{reason: reason}} -> {:error, %HTTPoison.Error{reason: reason}} end end defp perform_request(method, url, payload, headers, options, success_status_code) do case HTTPoison.request(method, url, payload, headers, options) do {:ok, %HTTPoison.Response{status_code: ^success_status_code, body: ""} = response} -> {:ok, %{}, response} {:ok, %HTTPoison.Response{status_code: ^success_status_code, body: body} = response} -> {:ok, Poison.Parser.parse!(body, %{}), response} {:ok, %HTTPoison.Response{body: body}} -> error = Poison.Parser.parse!(body, %{}) {:error, error} {:error, %HTTPoison.Error{reason: reason}} -> {:error, %HTTPoison.Error{reason: reason}} end end defp build_host(_endpoint_prefix, %{region: "local"}) do "localhost" end defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do "#{endpoint_prefix}.#{region}.#{endpoint}" end defp build_url(host, path, %{:proto => proto, :port => port}) do "#{proto}://#{host}:#{port}#{path}" end defp add_query(url, []) do url end defp add_query(url, query) do querystring = AWS.Util.encode_query(query) "#{url}?#{querystring}" end defp encode_payload(input) do if input != nil, do: Poison.Encoder.encode(input, %{}), else: "" end end
lib/aws/security_hub.ex
0.872741
0.764276
security_hub.ex
starcoder
defmodule CSSEx.HSLA do alias CSSEx.Unit @moduledoc """ Struct and helper functions for generating HSLA values. """ defstruct h: %Unit{value: 0, unit: nil}, s: %Unit{value: 0, unit: "%"}, l: %Unit{value: 0, unit: "%"}, a: 1 @type t() :: %CSSEx.HSLA{ h: non_neg_integer, s: non_neg_integer, l: non_neg_integer, a: non_neg_integer } @colors CSSEx.Helpers.Colors.colors_tuples() @doc """ Accepts any value in the form of a binary `"hsla(0, 10%, 20%, 0.5)"` or `"hsl(0, 10%, 20%)"`, any hexadecimal representation in binary in the form of `"#xxx"`, `"#xxxx"`, `"#xxxxxx"` or `"#xxxxxxxx"`, rgb/a as `"rgba(100,100,100,0.1)"` or `"rgb(10,20,30)"`, or any literal color name defined as web colors (CSSEx.Colors) - returns a `%CSSEx.HSLA{}` struct. """ @spec new_hsla(String.t()) :: {:ok, %CSSEx.HSLA{}} | {:error, term} def new_hsla(<<"hsla", values::binary>>) do case Regex.run(~r/\((.+),(.+),(.+),(.+)\)/, values) do [_, h, s, l, a] -> new( String.trim(h), String.trim(s), String.trim(l), String.trim(a) ) _ -> {:error, :invalid} end end def new_hsla(<<"hsl", values::binary>>) do case Regex.run(~r/\((.+),(.+),(.+)\)/, values) do [_, h, s, l] -> new( String.trim(h), String.trim(s), String.trim(l), "1" ) _ -> {:error, :invalid} end end def new_hsla(<<"rgb", _::binary>> = full) do case CSSEx.RGBA.new_rgba(full) do {:ok, rgba} -> from_rgba(rgba) error -> error end end def new_hsla(<<"#", _::binary>> = full) do case CSSEx.RGBA.new_rgba(full) do {:ok, rgba} -> from_rgba(rgba) error -> error end end Enum.each(@colors, fn [color, rgba] -> def new_hsla(unquote(color)) do case CSSEx.RGBA.new_rgba(unquote(rgba)) do {:ok, new_rgba} -> from_rgba(new_rgba) error -> error end end end) @doc """ Converts an existing `%CSSEx.RGBA{}` struct into a `%CSSEx.HSLA{}` struct. Taken from https://www.niwa.nu/2013/05/math-behind-colorspace-conversions-rgb-hsl/ """ @spec from_rgba(%CSSEx.RGBA{}) :: {:ok, %CSSEx.HSLA{}} def from_rgba(%CSSEx.RGBA{r: r, g: g, b: b, a: a}) do n_r = r / 255 n_g = g / 255 n_b = b / 255 {min, max} = Enum.min_max([n_r, n_g, n_b]) luminance = (min + max) / 2 * 100 saturation = case {min, max} do {same, same} -> 0 {min, max} when luminance <= 50 -> (max - min) / (max + min) * 100 {min, max} -> (max - min) / (2 - max - min) * 100 end hue = case {n_r, n_g, n_b} do {same, same, same} -> 0 {n_r, n_g, n_b} when n_r > n_g and n_r > n_b -> (n_g - n_b) / (max - min) {n_r, n_g, n_b} when n_g > n_r and n_g > n_b -> 2 + (n_b - n_r) / (max - min) {n_r, n_g, _n_b} -> 4 + (n_r - n_g) / (max - min) end hue_2 = case hue * 60 do new_hue when new_hue < 0 -> new_hue + 360 new_hue -> new_hue end {:ok, %__MODULE__{ h: %Unit{value: round(hue_2), unit: nil}, s: %Unit{value: round(saturation), unit: "%"}, l: %Unit{value: round(luminance), unit: "%"}, a: a }} end @doc """ Generates a `%CSSEx.HSLA{}` wrapped in an :ok tuple, from the values of h, s, l, and alpha. All values are treated as decimal. """ def new(h, s, l, a), do: { :ok, %__MODULE__{ h: new_hue(h), s: new_saturation(s), l: new_luminance(l), a: alpha_value(a, 10) } } @doc false def new_hue(val) when is_binary(val) do case Integer.parse(val, 10) do {parsed, _} -> valid_hue_val(parsed) :error -> %Unit{value: 0, unit: nil} end end def new_hue(val) when is_integer(val) or is_float(val), do: valid_hue_val(val) @doc false def new_saturation(val) when is_binary(val) do case Integer.parse(val, 10) do {parsed, _} -> valid_saturation_val(parsed) :error -> %Unit{value: 0, unit: "%"} end end def new_saturation(val) when is_integer(val) or is_float(val), do: valid_saturation_val(val) @doc false def new_luminance(val) when is_binary(val) do case Integer.parse(val, 10) do {parsed, _} -> valid_luminance_val(parsed) :error -> %Unit{value: 0, unit: "%"} end end def new_luminance(val) when is_integer(val) or is_float(val), do: valid_luminance_val(val) @doc false def alpha_value(val, 10) do case Float.parse(val) do {parsed, _} -> valid_alpha_val(parsed) :error -> 1 end end @doc false def valid_hue_val(n) when n <= 360 and n >= 0, do: %Unit{value: n, unit: nil} def valid_hue_val(_n), do: %Unit{value: 0, unit: nil} @doc false def valid_saturation_val(n) when n <= 100 and n >= 0, do: %Unit{value: n, unit: "%"} def valid_saturation_val(_n), do: %Unit{value: 0, unit: "%"} @doc false def valid_luminance_val(n) when n <= 100 and n >= 0, do: %Unit{value: n, unit: "%"} def valid_luminance_val(_n), do: %Unit{value: 0, unit: "%"} @doc false def valid_alpha_val(n) when n > 0 and n <= 1, do: n def valid_alpha_val(_n), do: 1 end defimpl String.Chars, for: CSSEx.HSLA do def to_string(%CSSEx.HSLA{ h: %CSSEx.Unit{value: h}, s: %CSSEx.Unit{value: s} = su, l: %CSSEx.Unit{value: l} = lu, a: a }), do: "hsla(#{round(h)},#{%{su | value: round(s)}},#{%{lu | value: round(l)}},#{a})" end
lib/structs/hsla.ex
0.870308
0.453746
hsla.ex
starcoder
defmodule OMG.Watcher.ExitProcessor.Canonicity do @moduledoc """ Encapsulates managing and executing the behaviors related to treating exits by the child chain and watchers Keeps a state of exits that are in progress, updates it with news from the root chain, compares to the state of the ledger (`OMG.Watcher.State`), issues notifications as it finds suitable. Should manage all kinds of exits allowed in the protocol and handle the interactions between them. This is the functional, zero-side-effect part of the exit processor. Logic should go here: - orchestrating the persistence of the state - finding invalid exits, disseminating them as events according to rules - enabling to challenge invalid exits - figuring out critical failure of invalid exit challenging (aka `:unchallenged_exit` event) - MoreVP protocol managing in general For the imperative shell, see `OMG.Watcher.ExitProcessor` """ alias OMG.Watcher.Block alias OMG.Watcher.Crypto alias OMG.Watcher.Event alias OMG.Watcher.ExitProcessor alias OMG.Watcher.ExitProcessor.Core alias OMG.Watcher.ExitProcessor.DoubleSpend alias OMG.Watcher.ExitProcessor.InFlightExitInfo alias OMG.Watcher.ExitProcessor.KnownTx alias OMG.Watcher.State.Transaction alias OMG.Watcher.Utxo import OMG.Watcher.ExitProcessor.Tools require Utxo require Logger @type competitor_data_t :: %{ input_txbytes: binary(), input_utxo_pos: Utxo.Position.t(), in_flight_txbytes: binary(), in_flight_input_index: non_neg_integer(), competing_txbytes: binary(), competing_input_index: non_neg_integer(), competing_sig: Crypto.sig_t(), competing_tx_pos: nil | Utxo.Position.t(), competing_proof: binary() } @type prove_canonical_data_t :: %{ in_flight_txbytes: binary(), in_flight_tx_pos: Utxo.Position.t(), in_flight_proof: binary() } @doc """ Returns a tuple with byzantine events: first element is a list of events for ifes with competitor and the second is the same list filtered for late ifes past sla margin """ @spec get_ife_txs_with_competitors(Core.t(), KnownTx.known_txs_by_input_t(), pos_integer()) :: {list(Event.NonCanonicalIFE.t()), list(Event.UnchallengedNonCanonicalIFE.t())} def get_ife_txs_with_competitors(state, known_txs_by_input, eth_height_now) do non_canonical_ifes = state.in_flight_exits |> Map.values() |> Stream.map(fn ife -> {ife, DoubleSpend.find_competitor(known_txs_by_input, ife.tx)} end) |> Stream.filter(fn {_ife, maybe_competitor} -> !is_nil(maybe_competitor) end) |> Stream.filter(fn {ife, %DoubleSpend{known_tx: %KnownTx{utxo_pos: utxo_pos}}} -> InFlightExitInfo.is_viable_competitor?(ife, utxo_pos) end) non_canonical_ife_events = non_canonical_ifes |> Stream.map(fn {ife, _double_spend} -> Transaction.raw_txbytes(ife.tx) end) |> Enum.uniq() |> Enum.map(fn txbytes -> %Event.NonCanonicalIFE{txbytes: txbytes} end) past_sla_margin = fn {ife, _double_spend} -> ife.eth_height + state.sla_margin <= eth_height_now end late_non_canonical_ife_events = non_canonical_ifes |> Stream.filter(past_sla_margin) |> Stream.map(fn {ife, _double_spend} -> Transaction.raw_txbytes(ife.tx) end) |> Enum.uniq() |> Enum.map(fn txbytes -> %Event.UnchallengedNonCanonicalIFE{txbytes: txbytes} end) {non_canonical_ife_events, late_non_canonical_ife_events} end @doc """ Returns byzantine events for open IFEs that were challenged with an invalid challenge """ @spec get_invalid_ife_challenges(Core.t()) :: list(Event.InvalidIFEChallenge.t()) def get_invalid_ife_challenges(%Core{in_flight_exits: ifes}) do ifes |> Map.values() |> Stream.filter(&InFlightExitInfo.is_invalidly_challenged?/1) |> Stream.map(&Transaction.raw_txbytes(&1.tx)) |> Enum.uniq() |> Enum.map(fn txbytes -> %Event.InvalidIFEChallenge{txbytes: txbytes} end) end @doc """ Gets the root chain contract-required set of data to challenge a non-canonical ife """ @spec get_competitor_for_ife(ExitProcessor.Request.t(), Core.t(), binary()) :: {:ok, competitor_data_t()} | {:error, :competitor_not_found} | {:error, :ife_not_known_for_tx} | {:error, :no_viable_competitor_found} | {:error, Transaction.decode_error()} def get_competitor_for_ife( %ExitProcessor.Request{blocks_result: blocks}, %Core{} = state, ife_txbytes ) do known_txs_by_input = KnownTx.get_all_from_blocks_appendix(blocks, state) # find its competitor and use it to prepare the requested data with {:ok, ife_tx} <- Transaction.decode(ife_txbytes), {:ok, ife} <- get_ife(ife_tx, state.in_flight_exits), {:ok, double_spend} <- get_competitor(known_txs_by_input, ife.tx), %DoubleSpend{known_tx: %KnownTx{utxo_pos: utxo_pos}} = double_spend, true <- check_viable_competitor(ife, utxo_pos), do: {:ok, prepare_competitor_response(double_spend, ife, blocks)} end @doc """ Gets the root chain contract-required set of data to challenge an ife appearing as non-canonical in the root chain contract but which is known to be canonical locally because included in one of the blocks """ @spec prove_canonical_for_ife(Core.t(), binary()) :: {:ok, prove_canonical_data_t()} | {:error, :no_viable_canonical_proof_found} def prove_canonical_for_ife(%Core{} = state, ife_txbytes) do with {:ok, raw_ife_tx} <- Transaction.decode(ife_txbytes), {:ok, ife} <- get_ife(raw_ife_tx, state.in_flight_exits), true <- check_is_invalidly_challenged(ife), do: {:ok, prepare_canonical_response(ife)} end defp prepare_competitor_response( %DoubleSpend{ index: in_flight_input_index, known_spent_index: competing_input_index, known_tx: %KnownTx{signed_tx: known_signed_tx, utxo_pos: known_tx_utxo_pos} }, %InFlightExitInfo{tx: signed_ife_tx} = ife, blocks ) do {:ok, input_witnesses} = Transaction.Signed.get_witnesses(signed_ife_tx) owner = input_witnesses[in_flight_input_index] %{ input_tx: Enum.at(ife.input_txs, in_flight_input_index), input_utxo_pos: Enum.at(ife.input_utxos_pos, in_flight_input_index), in_flight_txbytes: signed_ife_tx |> Transaction.raw_txbytes(), in_flight_input_index: in_flight_input_index, competing_txbytes: known_signed_tx |> Transaction.raw_txbytes(), competing_input_index: competing_input_index, competing_sig: find_sig!(known_signed_tx, owner), competing_tx_pos: known_tx_utxo_pos || Utxo.position(0, 0, 0), competing_proof: maybe_calculate_proof(known_tx_utxo_pos, blocks) } end defp prepare_canonical_response(%InFlightExitInfo{tx: tx, tx_seen_in_blocks_at: {pos, proof}}), do: %{in_flight_txbytes: Transaction.raw_txbytes(tx), in_flight_tx_pos: pos, in_flight_proof: proof} defp maybe_calculate_proof(nil, _), do: <<>> defp maybe_calculate_proof(Utxo.position(blknum, txindex, _), blocks) do blocks |> Enum.find(fn %Block{number: number} -> blknum == number end) |> Block.inclusion_proof(txindex) end defp get_competitor(known_txs_by_input, signed_ife_tx) do known_txs_by_input |> DoubleSpend.find_competitor(signed_ife_tx) |> case do nil -> {:error, :competitor_not_found} value -> {:ok, value} end end defp check_viable_competitor(ife, utxo_pos), do: if(InFlightExitInfo.is_viable_competitor?(ife, utxo_pos), do: true, else: {:error, :no_viable_competitor_found}) defp check_is_invalidly_challenged(ife), do: if(InFlightExitInfo.is_invalidly_challenged?(ife), do: true, else: {:error, :no_viable_canonical_proof_found}) end
apps/omg_watcher/lib/omg_watcher/exit_processor/canonicity.ex
0.810966
0.541712
canonicity.ex
starcoder
defmodule Kayrock.Compression do @moduledoc """ Handles compression/decompression of messages. NOTE this is a copy of KafkaEx.Compression: https://github.com/kafkaex/kafka_ex/blob/master/lib/kafka_ex/compression.ex It is duplicated here to avoid creating a circular dependency. See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Compression To add new compression types: 1. Add the appropriate dependency to mix.exs (don't forget to add it to the application list). 2. Add the appropriate attribute value and compression_type atom. 3. Add a decompress function clause. 4. Add a compress function clause. """ @gzip_attribute 1 @snappy_attribute 2 @type attribute_t :: integer @type compression_type_t :: :snappy | :gzip @doc """ This function should pattern match on the attribute value and return the decompressed data. """ @spec decompress(attribute_t, binary) :: binary def decompress(@gzip_attribute, <<window_size::8-signed, _::bits>> = data) do z = :zlib.open() :zlib.inflateInit(z, window_size) [v | _] = :zlib.inflate(z, data) v end def decompress(@snappy_attribute, data) do case data do <<130, "SNAPPY", 0, _snappy_version_info::64, rest::binary>> -> snappy_decompress_chunk(rest, <<>>) _ -> {:ok, decompressed_value} = :snappy.decompress(data) decompressed_value end end @doc """ This function should pattern match on the compression_type atom and return the compressed data as well as the corresponding attribute value. """ @spec compress(compression_type_t, iodata) :: {binary, attribute_t} def compress(:snappy, data) do {:ok, compressed_data} = :snappy.compress(data) {compressed_data, @snappy_attribute} end def compress(:gzip, data) do compressed_data = :zlib.gzip(data) {compressed_data, @gzip_attribute} end def snappy_decompress_chunk(<<>>, so_far) do so_far end def snappy_decompress_chunk(<<0::32-signed, _rest::bits>>, so_far) do so_far end def snappy_decompress_chunk( <<valsize::32-unsigned, value::size(valsize)-binary, rest::bits>>, so_far ) do {:ok, decompressed_value} = :snappy.decompress(value) snappy_decompress_chunk(rest, so_far <> decompressed_value) end end
lib/kayrock/compression.ex
0.837387
0.631665
compression.ex
starcoder
defmodule MeshxRpc.Client.Pool do require Logger alias MeshxRpc.App.{C, T} alias MeshxRpc.Common.{Options, Structs.Data} @error_prefix :error_rpc @error_prefix_remote :error_rpc_remote @request_retries_statem 5 @opts [ idle_reconnect: [ type: :timeout, default: 600_000, doc: """ after RPC client-server connection is established or after RPC request processing, client worker enters `idle` state waiting for the next user request. `:idle_reconnect` specifies amount of idle time after which client should reestablish connection. One can think about this feature as high level TCP keep-alive/heartbeat action. """ ], retry_idle_error: [ type: :pos_integer, default: 1_000, doc: """ amount of time RPC client worker should wait before reconnecting after connection failure when in idle state. """ ], retry_hsk_fail: [ type: :pos_integer, default: 5_000, doc: """ amount of time RPC client worker should wait before reconnecting after handshake failure. Most common handshake failure reason probably will be inconsistent client/server configuration, for example different `:shared_key` option on client and server. """ ], retry_proxy_fail: [ type: :pos_integer, default: 1_000, doc: """ amount of time RPC client worker should wait before retrying reconnect to `:address` after initial socket connection failure. If `:address` points to mesh upstream endpoint proxy address, failures here can be associated with proxy binary problem. """ ], timeout_connect: [ type: :timeout, default: 5_000, doc: """ timeout used when establishing initial TCP socket connection with RPC server. """ ], exec_retry_on_error: [ type: {:list, :atom}, default: [:closed, :tcp_closed], doc: """ list of request processing errors on which request execution should be retried. """ ] ] @moduledoc """ RPC client workers pool. ## Configuration RPC client pool is configured with `opts` argument in `child_spec/2` function. Configuration options common to both RPC client and server are described in `MeshxRpc` "Common configuration" section. Configuration options specific to RPC client `opts` argument in `child_spec/2`: #{NimbleOptions.docs(@opts)} Values for options prefixed with `:retry-` and `:idle_reconnect` are randomized by `+/-10%`. Unit for time related options is millisecond. """ @worker_mod MeshxRpc.Client.Worker @doc """ Returns a specification to start a RPC client workers pool under a supervisor. `id` is a pool id which might be a name of a module implementing user RPC functions. `opts` are options described in "Configuration" section above and in `MeshxRpc` "Common configuration" section. Example: ```elixir iex(1)> MeshxRpc.Client.Pool.child_spec(Example1.Client, address: {:uds, "/tmp/meshx.sock"}) {Example1.Client, {:poolboy, :start_link, [ [ name: {:local, Example1.Client}, worker_module: MeshxRpc.Client.Worker ], [ ... ] ]}, :permanent, 5000, :worker, [:poolboy]} ``` """ @spec child_spec(id :: atom() | String.t(), opts :: Keyword.t()) :: :supervisor.child_spec() def child_spec(id, opts \\ []) do opts = NimbleOptions.validate!(opts, @opts ++ Options.common()) pool_opts = Keyword.fetch!(opts, :pool_opts) |> Keyword.put_new(:worker_module, @worker_mod) |> Keyword.put_new(:name, {:local, id}) data = Data.init(id, opts) |> Map.put(:idle_reconnect, Keyword.fetch!(opts, :idle_reconnect)) |> Map.put(:retry_idle_error, Keyword.fetch!(opts, :retry_idle_error)) |> Map.put(:retry_hsk_fail, Keyword.fetch!(opts, :retry_hsk_fail)) |> Map.put(:retry_proxy_fail, Keyword.fetch!(opts, :retry_proxy_fail)) |> Map.put(:timeout_connect, Keyword.fetch!(opts, :timeout_connect)) node_ref_mfa = Keyword.fetch!(opts, :node_ref_mfa) svc_ref_mfa = Keyword.get(opts, :svc_ref_mfa, id |> to_string() |> String.slice(0..255)) conn_ref_mfa = Keyword.fetch!(opts, :conn_ref_mfa) gen_statem_opts = Keyword.fetch!(opts, :gen_statem_opts) retry_on_error = Keyword.fetch!(opts, :exec_retry_on_error) :persistent_term.put({C.lib(), :retry_on_error}, retry_on_error) {id, start, restart, shutdown, type, modules} = :poolboy.child_spec(id, pool_opts, [{data, node_ref_mfa, svc_ref_mfa, conn_ref_mfa}, gen_statem_opts]) %{id: id, start: start, restart: restart, shutdown: shutdown, type: type, modules: modules} end @doc """ Sends an asynchronous RPC cast `request` using workers `pool`. Function always immediately returns `:ok`, even if `pool` doesn't exist or any other error takes place. `args`, `timeout`, `retry` and `retry_sleep` function arguments have the same meaning as in case of `call/6`. Example: ```elixir iex(1)> MeshxRpc.Client.Pool.cast(NotExisting, :undefined, []) :ok ``` """ @spec cast( pool :: atom(), request :: atom(), args :: list(), timeout :: timeout, retry :: pos_integer(), retry_sleep :: non_neg_integer() ) :: :ok def cast(pool, request, args, timeout \\ :infinity, retry \\ 5, retry_sleep \\ 100) do spawn(__MODULE__, :retry_request, [pool, :cast, request, args, timeout, retry, retry_sleep]) :ok end @doc """ Makes a synchronous RPC call `request` using workers `pool` and waits for reply. If successful, `call/6` returns `Kernel.apply(RpcServerMod, request, args)` evaluation result on remote server. If error occurs during request processing `{:error_rpc, reason}` is returned. User defined RPC server functions should not return results as tuples with first tuple element being `:error_rpc` or any atom name starting with `:error_rpc` (for example `:error_rpc_remote`) as those tuples are reserved by `MeshxRpc` internally for error reporting and processing. Possible request processing errors: * `:full` - all client pool workers are busy and pool manager cannot checkout new worker, * `:killed` - process executing request on remote server was killed because function execution time exceeded allowed timeout (see `MeshxRpc.Server.Pool` option `:timeout_execute`), * `:invalid_cks` - checksum check with user provided checksum function (`:cks_mfa` option) failed, * `:timeout_cks` - checksum calculation timeout, * `:closed` - client worker received user request before handshake with server was completed, * `:tcp_closed` - TCP socket connection was closed, * `:invalid_ref` - request message failed referential integrity check, * `{:undef, [...]}` - request function not defined on server, * `:invalid_state` - server or client worker encountered inconsistent critical state, * any `:inet` [POSIX Error Codes](http://erlang.org/doc/man/inet.html#posix-error-codes), * any errors from (de)serialization function. If remote server does not respond within time specified by `timeout` argument, process executing RPC call is killed, the function call fails and the caller exits. Additionally connection to remote server is closed which kills corresponding RPC call process being executed on the server. Please be aware of `MeshxRpc.Server.Pool` `:timeout_execute` configuration option playing similar role to `timeout` function argument on the server side. If error occurs during request processing and error reason is in list defined by `:exec_retry_on_error` configuration option, request will be retried `retry` times with randomized exponential back-off starting with `retry_sleep` msec. Example: ```elixir iex(1)> MeshxRpc.Client.Pool.call(Example1.Client, :echo, "hello world") "hello world" iex(2)> MeshxRpc.Client.Pool.call(Example1.Client, :not_existing, "hello world") {:error_rpc, {:undef, [...]}} """ @spec call( pool :: atom(), request :: atom(), args :: list(), timeout :: timeout, retry :: pos_integer(), retry_sleep :: non_neg_integer() ) :: term() | {:error_rpc, reason :: term()} def call(pool, request, args, timeout \\ :infinity, retry \\ 5, retry_sleep \\ 100) do case retry_request(pool, :call, request, args, timeout, retry, retry_sleep) do {@error_prefix_remote, e} -> {@error_prefix, e} r -> r end end @doc """ Same as `call/6`, will reraise remote exception locally. Example: ```elixir iex(1)> MeshxRpc.Client.Pool.call(Example1.Client, :raise_test, "raise kaboom!") {:error_rpc, %RuntimeError{message: "raise kaboom!"}} iex(2)> MeshxRpc.Client.Pool.call!(Example1.Client, :raise_test, "raise kaboom!") ** (RuntimeError) raise kaboom! ``` """ @spec call!( pool :: atom(), request :: atom(), args :: list(), timeout :: timeout, retry :: pos_integer(), retry_sleep :: non_neg_integer() ) :: term() | {:error_rpc, reason :: term()} def call!(pool, request, args, timeout \\ :infinity, retry \\ 5, retry_sleep \\ 100) do case retry_request(pool, :call, request, args, timeout, retry, retry_sleep) do {@error_prefix_remote, e} when is_exception(e) -> raise(e) {@error_prefix_remote, e} -> {@error_prefix, e} r -> r end end def retry_request(pool, req_type, request, args, timeout, retry, retry_sleep, retries \\ 0) when req_type in [:cast, :call] do case request(pool, req_type, request, args, timeout) do {err, e} when err in [@error_prefix, @error_prefix_remote] -> retry_on_error = :persistent_term.get({MeshxRpc.App.C.lib(), :retry_on_error}) if e in retry_on_error and retries < retry do retry_sleep |> T.rand_retry() |> Process.sleep() retry_request(pool, req_type, request, args, timeout, retry, retry_sleep * 2, retries + 1) else {err, e} end r -> r end end defp request(pool, req_type, request, args, timeout, retries_statem \\ 0) do case :poolboy.checkout(pool, false) do pid when is_pid(pid) -> ref = if timeout == :infinity do nil else {:ok, ref} = :timer.kill_after(timeout, pid) ref end result = try do :gen_statem.call(pid, {:request, {req_type, request, args}}) catch :exit, e -> if retries_statem < @request_retries_statem, do: request(pool, request, args, timeout, retries_statem + 1), else: exit(e) else r -> r end if !is_nil(ref), do: {:ok, :cancel} = :timer.cancel(ref) :poolboy.checkin(pool, pid) result :full -> {@error_prefix, :full} end end end
lib/client/pool.ex
0.837587
0.554651
pool.ex
starcoder
defmodule Stripe.Entity do @moduledoc """ A behaviour implemented by modules which represent Stripe objects. Intended for internal use within the library. A Stripe Entity is just a struct, optionally containing some logic for transforming a raw result from the Stripe API into a final struct. This is achieved through the use of the `from_json/2` macro. The list of objects which are recognised by the library upon receipt are currently static and contained in `Stripe.Converter`. When a map containing the `"object"` key is received from the API (even when nested inside another map), and the value of that field (for example, `"foo_widget"`) is in the list of supported objects, the converter will expect `Stripe.FooWidget` to be present and to implement this behaviour. To implement this behaviour, simply add `use Stripe.Entity` to the top of the entity module and make sure it defines a struct. This will also enable the use of the `from_json/2` macro, which allows for changes to the data received from Stripe before it is converted to a struct. """ @doc false # Not to be directly implemented, use the `from_json/2` macro instead @callback __from_json__(data :: map) :: map @doc false defmacro __using__(_opts) do quote do require Stripe.Entity import Stripe.Entity, only: [from_json: 2] @behaviour Stripe.Entity def __from_json__(data), do: data defoverridable __from_json__: 1 end end @doc """ Specifies logic that transforms data from Stripe to our Stripe object. The Stripe API docs specify that: > JSON is returned by all API responses, including errors, although our API > libraries convert responses to appropriate language-specific objects. To this end, sometimes it is desirable to make changes to the raw data received from the Stripe API, to aid its conversion into an appropriate Elixir data struct. One example is the convention of converting `"enum"` values (for example `"status"` values of `"succeeded"` or `"failed"`) into atoms instead of keeping them as strings. This macro is used in modules implementing the `Stripe.Entity` behaviour in order to specify this extra logic. Its use is optional, and the default is no transformation; i.e. the received JSON keys are merely converted to atoms and cast to the struct defined by the module. The macro is used like this: ``` from_json data do data |> cast_to_atom([:type, :status]) |> cast_each(:fee_details, &cast_to_atom(&1, :type)) end ``` It takes a parameter name to which the data received from Stripe is bound, and a `do` block which should return the transformed data. The transformation receives the JSON response from Stripe, with all keys converted to atoms (apart from keys inside a metadata map, which remain binaries) and should return a map which is ready to be cast to the struct the module defines. The helper `cast_*` functions defined in this module are automatically imported into the scope of this macro. The helper functions are all `nil`/missing key-safe, meaning that they will not magically add fields or error on fields which are missing or unset. You should therefore write your transformation assuming all possible data is actually present. """ defmacro from_json(param, do: block) do quote do def __from_json__(unquote(param)) do import Stripe.Entity, except: [from_json: 2] unquote(block) end end end @doc """ Cast the value of the given key or keys to an atom. Provide either a single atom key or a list of atom keys whose values should be converted from binaries to atoms. Used commonly to convert `"enum"` values (values which belong to a predefined set) in Stripe responses, for example a `:status` field. If a key is not set or the value is `nil`, no transformation occurs. """ @spec cast_to_atom(map, atom | [atom]) :: map def cast_to_atom(%{} = data, keys) when is_list(keys) do Enum.reduce(keys, data, fn key, data -> cast_to_atom(data, key) end) end def cast_to_atom(%{} = data, key) do key = List.wrap(key) maybe_update_in(data, key, maybe(&String.to_atom/1)) end @doc """ Applies the given function over a list present in the data. Provide either a single atom key or a list of atom keys whose values are lists. Each element of such a list will be mapped using the function passed. For example, if there is a field `:fee_details` which is a list of maps, each containing a `:type` key whose value we want to cast to an atom, then we write: ``` data |> cast_each(:fee_details, &cast_to_atom(&1, :type)) ``` If a key is not set or the value is `nil`, no transformation occurs. """ @spec cast_each(map, atom | [atom], (any -> any)) :: map def cast_each(%{} = data, keys, fun) when is_list(keys) and is_function(fun) do Enum.reduce(keys, data, fn key, data -> cast_each(data, key, fun) end) end def cast_each(%{} = data, key, fun) when is_function(fun) do key = List.wrap(key) maybe_update_in(data, key, maybe(&Enum.map(&1, fun))) end @doc """ Applies the given function to a field accessed via the path provided. Provide a path (identical to that used by the `Access` protocol) to the field to be modified, and a function to be applied to its value. For example, if the field `:fraud_details` contains a map whose keys are `:user_report` and `:stripe_report` we wish to convert to atoms, then we write: ``` data |> cast_path([:fraud_details], &cast_to_atom(&1, [:user_report, :stripe_report])) ``` Unlike `Kernel.update_in/2`, if the path to the key does not exist, or if the value is `nil`, then no transformation occurs. """ @spec cast_path(map, [atom], (any -> any)) :: map def cast_path(%{} = data, path, fun) when is_function(fun) do maybe_update_in(data, path, fun) end defp maybe(fun) do fn nil -> nil arg -> fun.(arg) end end defp maybe_update_in(data, path, fun) do case get_in(data, path) do nil -> data val -> put_in(data, path, fun.(val)) end end end
lib/stripe/entity.ex
0.930205
0.89607
entity.ex
starcoder
defmodule JSON.LD.LoadingDocumentFailedError do @moduledoc """ The document could not be loaded or parsed as JSON. """ defexception code: "loading document failed", message: nil end defmodule JSON.LD.ListOfListsError do @moduledoc """ A list of lists was detected. List of lists are not supported in this version of JSON-LD due to the algorithmic complexity. """ defexception code: "list of lists", message: nil end defmodule JSON.LD.InvalidIndexValueError do @moduledoc """ An @index member was encountered whose value was not a string. """ defexception code: "invalid @index value", message: nil end defmodule JSON.LD.ConflictingIndexesError do @moduledoc """ Multiple conflicting indexes have been found for the same node. """ defexception code: "conflicting indexes", message: nil end defmodule JSON.LD.InvalidIdValueError do @moduledoc """ An @id member was encountered whose value was not a string. """ defexception code: "invalid @id value", message: nil end defmodule JSON.LD.InvalidLocalContextError do @moduledoc """ An invalid local context was detected. """ defexception code: "invalid local context", message: nil end defmodule JSON.LD.MultipleContextLinkHeadersError do @moduledoc """ Multiple HTTP Link Headers [RFC5988] using the http://www.w3.org/ns/json-ld#context link relation have been detected. """ defexception code: "multiple context link headers", message: nil end defmodule JSON.LD.LoadingRemoteContextFailedError do @moduledoc """ There was a problem encountered loading a remote context. """ defexception code: "loading remote context failed", message: nil end defmodule JSON.LD.InvalidRemoteContextError do @moduledoc """ No valid context document has been found for a referenced, remote context. """ defexception code: "invalid remote context", message: nil end defmodule JSON.LD.RecursiveContextInclusionError do @moduledoc """ A cycle in remote context inclusions has been detected. """ defexception code: "recursive context inclusion", message: nil end defmodule JSON.LD.InvalidBaseIRIError do @moduledoc """ An invalid base IRI has been detected, i.e., it is neither an absolute IRI nor null. """ defexception code: "invalid base IRI", message: nil end defmodule JSON.LD.InvalidVocabMappingError do @moduledoc """ An invalid vocabulary mapping has been detected, i.e., it is neither an absolute IRI nor null. """ defexception code: "invalid vocab mapping", message: nil end defmodule JSON.LD.InvalidDefaultLanguageError do @moduledoc """ The value of the default language is not a string or null and thus invalid. """ defexception code: "invalid default language", message: nil end defmodule JSON.LD.KeywordRedefinitionError do @moduledoc """ A keyword redefinition has been detected. """ defexception code: "keyword redefinition", message: nil end defmodule JSON.LD.InvalidTermDefinitionError do @moduledoc """ An invalid term definition has been detected. """ defexception code: "invalid term definition", message: nil end defmodule JSON.LD.InvalidReversePropertyError do @moduledoc """ An invalid reverse property definition has been detected. """ defexception code: "invalid reverse property", message: nil end defmodule JSON.LD.InvalidIRIMappingError do @moduledoc """ A local context contains a term that has an invalid or missing IRI mapping.. """ defexception code: "invalid IRI mapping", message: nil end defmodule JSON.LD.CyclicIRIMappingError do @moduledoc """ A cycle in IRI mappings has been detected. """ defexception code: "cyclic IRI mapping", message: nil end defmodule JSON.LD.InvalidKeywordAliasError do @moduledoc """ An invalid keyword alias definition has been encountered. """ defexception code: "invalid keyword alias", message: nil end defmodule JSON.LD.InvalidTypeMappingError do @moduledoc """ An @type member in a term definition was encountered whose value could not be expanded to an absolute IRI. """ defexception code: "invalid type mapping", message: nil end defmodule JSON.LD.InvalidLanguageMappingError do @moduledoc """ An @language member in a term definition was encountered whose value was neither a string nor null and thus invalid. """ defexception code: "invalid language mapping", message: nil end defmodule JSON.LD.CollidingKeywordsError do @moduledoc """ Two properties which expand to the same keyword have been detected. This might occur if a keyword and an alias thereof are used at the same time. """ defexception code: "colliding keywords", message: nil end defmodule JSON.LD.InvalidContainerMappingError do @moduledoc """ An @container member was encountered whose value was not one of the following strings: @list, @set, or @index. """ defexception code: "invalid container mapping", message: nil end defmodule JSON.LD.InvalidTypeValueError do @moduledoc """ An invalid value for an @type member has been detected, i.e., the value was neither a string nor an array of strings. """ defexception code: "invalid type value", message: nil end defmodule JSON.LD.InvalidValueObjectError do @moduledoc """ A value object with disallowed members has been detected. """ defexception code: "invalid value object", message: nil end defmodule JSON.LD.InvalidValueObjectValueError do @moduledoc """ An invalid value for the @value member of a value object has been detected, i.e., it is neither a scalar nor null. """ defexception code: "invalid value object value", message: nil end defmodule JSON.LD.InvalidLanguageTaggedStringError do @moduledoc """ A language-tagged string with an invalid language value was detected. """ defexception code: "invalid language-tagged string", message: nil end defmodule JSON.LD.InvalidLanguageTaggedValueError do @moduledoc """ A number, true, or false with an associated language tag was detected. """ defexception code: "invalid language-tagged value", message: nil end defmodule JSON.LD.InvalidTypedValueError do @moduledoc """ A typed value with an invalid type was detected. """ defexception code: "invalid typed value", message: nil end defmodule JSON.LD.InvalidSetOrListObjectError do @moduledoc """ A set object or list object with disallowed members has been detected. """ defexception code: "invalid set or list object", message: nil end defmodule JSON.LD.InvalidLanguageMapValueError do @moduledoc """ An invalid value in a language map has been detected. It has to be a string or an array of strings. """ defexception code: "invalid language map value", message: nil end defmodule JSON.LD.CompactionToListOfListsError do @moduledoc """ The compacted document contains a list of lists as multiple lists have been compacted to the same term. """ defexception code: "compaction to list of lists", message: nil end defmodule JSON.LD.InvalidReversePropertyMapError do @moduledoc """ CollidingKeywordsError """ defexception code: "invalid reverse property map", message: nil end defmodule JSON.LD.InvalidReverseValueError do @moduledoc """ An invalid value for an @reverse member has been detected, i.e., the value was not a JSON object. """ defexception code: "invalid @reverse value", message: nil end defmodule JSON.LD.InvalidReversePropertyValueError do @moduledoc """ An invalid value for a reverse property has been detected. The value of an inverse property must be a node object. """ defexception code: "invalid reverse property value", message: nil end
lib/json/ld/exceptions.ex
0.841988
0.404096
exceptions.ex
starcoder
defmodule MongoEx do defguard is_field(literal) when is_atom(literal) defguard is_value(literal) when not is_atom(literal) def dumpers(_primitive, type), do: [type] def all(query, params) do normalized = %{} sources = [] normalized |> from(query) |> filter(query, sources, params) |> projection(query) end defp from(normalized, query) do %{from: %{source: {schema_name, _schema_module}}} = query normalized |> Map.put(:schema, schema_name) |> normalize_hints(query) end defp normalize_hints(normalized, %{from: %{hints: hints}} = _query) when length(hints) > 0 do Map.put(normalized, :hint, list_to_projection(hints)) end defp normalize_hints(normalized, _query), do: normalized defp filter(normalized, query, sources, params) do Map.put(normalized, :query, wheres(query, sources, params)) end @where_ops %{ and: "$and", or: "$or" } defp wheres(%{wheres: wheres} = query, sources, params) when length(wheres) > 1 do tuples = Enum.map(wheres, &expr(&1, sources, params, query)) |> Enum.zip(wheres) acc_wheres(tuples) end defp wheres(%{wheres: [where_expr]} = query, sources, params), do: expr(where_expr, sources, params, query) defp wheres(%{wheres: []}, _sources, _params), do: %{} defp acc_wheres([{expr, %{op: curr_op}} | tail]), do: acc_wheres(tail, curr_op, [expr]) defp acc_wheres([{expr, %{op: curr_op}} | tail], prev_op, acc_exprs) when prev_op == curr_op do acc_wheres(tail, curr_op, acc_exprs ++ [expr]) end defp acc_wheres([{expr, %{op: curr_op}} | tail], prev_op, acc_exprs) when prev_op != curr_op and length(acc_exprs) > 1 do acc_wheres( tail, curr_op, [ %{@where_ops[prev_op] => acc_exprs}, expr ] ) end defp acc_wheres([{expr, %{op: curr_op}} | tail], prev_op, [prev_expr]) when prev_op != curr_op do acc_wheres( tail, curr_op, [prev_expr] ++ [expr] ) end defp acc_wheres([], prev_op, acc_expr) do %{@where_ops[prev_op] => acc_expr} end defp projection(normalized, query) do %{select: %{fields: fields}} = query Map.put(normalized, :projection, select_fields(fields, query)) end defp select_fields([], _query) do %{} end defp select_fields(fields, _query) do Enum.map(fields, fn {{:., _, [{:&, [], [_idx]}, field_name]}, [], []} -> {normalize_field(field_name), 1} end) |> Map.new() end defp list_to_projection(list) do Enum.reduce(list, %{}, fn item, acc -> Map.put(acc, item, 1) end) end defp expr(%Ecto.Query.BooleanExpr{expr: expr} = _where, sources, params, query) do expr(expr, sources, params, query) end defp expr({:or, _, args}, sources, params, query) do [left, right] = args left = expr(left, sources, params, query) right = expr(right, sources, params, query) %{"$or" => [left, right]} end defp expr({:and, _, args}, sources, params, query) do [left, right] = args left = expr(left, sources, params, query) right = expr(right, sources, params, query) %{"$and" => [left, right]} end binary_ops = [==: "$eq", !=: "$ne", <=: "$lte", >=: "$gte", <: "$lt", >: "$gt"] @binary_ops Keyword.keys(binary_ops) Enum.map(binary_ops, fn {op, str} -> defp expr({unquote(op), _, args}, sources, params, query) when unquote(op) in @binary_ops do normalize_comparison(unquote(str), args, sources, params, query) end end) defp expr({:^, _, [param_idx]}, _sources, params, _query) do Enum.at(params, param_idx) end defp expr({{:., _, [{:&, _, [_idx]}, field]}, _, []}, _sources, _params, _query) when is_atom(field) do field end defp expr({:{}, _, exprs}, sources, params, query) do Enum.map(exprs, &expr(&1, sources, params, query)) end defp expr(exprs, sources, params, query) when is_list(exprs) do Enum.map(exprs, &expr(&1, sources, params, query)) end defp expr(literal, _sources, _params, _query) when is_binary(literal), do: literal defp expr(literal, _sources, _params, _query) when is_integer(literal), do: literal defp expr(literal, _sources, _params, _query) when is_float(literal), do: literal defp normalize_comparison(op, [left, right] = _args, sources, params, query) do left = expr(left, sources, params, query) right = expr(right, sources, params, query) pairs = get_field_value_pairs(op, left, right) Enum.reduce(pairs, %{}, fn pair, acc -> [field: field, op: op, value: value] = pair Map.put(acc, normalize_field(field), %{op => value}) end) end defp get_field_value_pairs(op, left_expr, right_expr) when is_list(left_expr) and is_list(right_expr) do pairs = Enum.zip(left_expr, right_expr) Enum.flat_map(pairs, fn {left, right} -> get_field_value_pairs(op, left, right) end) end defp get_field_value_pairs(op, left_literal, right_literal) when is_field(left_literal) and is_value(right_literal) do [[field: left_literal, op: op, value: right_literal]] end @tuple_inverse_binary_ops %{ "$eq" => "$eq", "$ne" => "$ne", "$lte" => "$gt", "$gte" => "$lt", "$lt" => "$gte", "$gt" => "$lte" } defp get_field_value_pairs(op, left_literal, right_literal) when is_value(left_literal) and is_field(right_literal) do inverse_op = Map.get(@tuple_inverse_binary_ops, op) [[field: right_literal, op: inverse_op, value: left_literal]] end defp normalize_field(field) when is_atom(field), do: Atom.to_string(field) end
lib/mongo_ex.ex
0.614163
0.501465
mongo_ex.ex
starcoder
defmodule Network do @moduledoc""" Create a simple neural network to train across relatively simple tasks. """ @doc""" The primary function of a neural net is to learn to produce the right output from a given input (learning). The train call receives the scape and size and creates accordingly. ex: Network.train(:rubix, :small) """ def train(scape, size) do Network.create(:ffnn, scape, size) end @doc""" default creation - calls Network.create(:ffnn, :xor, :medium) """ def create do create(:ffnn, :xor, :medium) end @doc""" """ def create(type, scape, size) do [c] = Cortex.generate(scape, type) neurons = Neuron.generate(size) #Empty neurons sensors = Interactor.generate(scape, :sensor) |> Enum.map(fn x -> Interactor.fanout_neurons(x, neurons) end) actuators = Interactor.generate(scape, :actuator) |> Enum.map(fn x -> Interactor.fanin_neurons(x, neurons) end) neurons_plus = Neuron.assign_inputs_outputs_and_weights(neurons, sensors, actuators) # Neurons assigned input_neurons, output_neurons, and weights |> Enum.map(fn x -> %{x | cortex_id: c.id} end) # Neurons given cx_id sensors_plus = Enum.map(sensors, fn x -> %{x | cortex_id: c.id} end) # Sensors given cx_id actuators_plus = Enum.map(actuators, fn x -> %{x | cortex_id: c.id, output_pids: c.id} end) # Actuators given cx_id genotype = [neurons_plus, sensors_plus, actuators_plus, [c]] genotype end def link_and_process(genotype) do [neurons, sensors, actuators, [cortex]] = genotype table = :ets.new(:table, [:set, :private]) gen_pids(genotype, table) # Neurons, sensors, actuators spawned, pids send to :ets table. Fetched in the next line [neurons_plus_pids, sensors_plus_pids, actuators_plus_pids] = for x <- [neurons, sensors, actuators], do: Enum.map(x, fn y -> %{y | pid: :ets.lookup_element(table, y.id, 2)} end) cortex_running = %{cortex | pid: spawn(Cortex, :run, [[neurons_plus_pids, sensors_plus_pids, actuators_plus_pids, cortex], self(), []])} neurons_plus_outs = Enum.map(neurons_plus_pids, fn x -> assign_output_pids(x, table) end) sensors_plus_outs = Enum.map(sensors_plus_pids, fn x -> assign_output_pids(x, table) end) :ets.delete(table) actuators_plus_outs = Enum.map(actuators_plus_pids, fn x -> %{x | output_pids: cortex_running.pid} end) Enum.each(neurons_plus_outs, fn x -> send x.pid, {:update_pids, x.output_pids, cortex_running.pid} end) Enum.each(sensors_plus_outs, fn x -> send x.pid, {:update_pids_sensor, x.output_pids, cortex_running.pid} end) Enum.each(actuators_plus_outs, fn x -> send x.pid, {:update_pids_actuator, cortex_running.pid} end) send cortex_running.pid, {:start, Scape.get_count(cortex_running.scape)} receive do {:nn_error, error, training_size} -> [{:error, error}, {:training_size, training_size}] end end def assign_output_pids(unit, table) do case unit.id do {:neuron, _} -> %{unit | output_pids: Enum.map(unit.output_neurons, fn x -> :ets.lookup_element(table, x, 2) end)} {:sensor, _} -> %{unit | output_pids: Enum.map(unit.fanout_ids, fn x -> :ets.lookup_element(table, x, 2) end)} end end @doc""" Receives phenotype and "activates" all nodes. """ def gen_pids(genotype, table) do [neurons, sensors, actuators, [cortex]] = genotype n_list = set_pids(neurons, genotype) s_list = set_pids(sensors, genotype) a_list = set_pids(actuators, genotype) full_list = List.flatten([n_list, s_list, a_list]) Enum.each(full_list, fn x -> :ets.insert(table, x) end) end @doc""" Creates a list of tuples - {node_id, pid} """ def set_pids(list, genotype) do [head | tail] = list case head.id do {:actuator, id} -> Enum.map(list, fn x -> {x.id, spawn(Interactor, :run, [:actuator, genotype, []])} end) {:sensor, id} -> Enum.map(list, fn x -> {x.id, spawn(Interactor, :run, [:sensor, genotype, []])} end) {:neuron, id} -> Enum.map(list, fn x -> {x.id, spawn(Neuron, :run, [x, []])} end) end end end
lib/network.ex
0.742422
0.766119
network.ex
starcoder
defmodule Theater.Demo.Order do @moduledoc """ A more complex demonstration Actor. This is a simple actor for demonstrating how implementing an Actor works. It represents an order and goes through several stages like a state machine. It can also interact with other Actors. Items can be added to an order and when payment is received the order is closed and ready for shipping. Once an order is shipped it is done and we close it. The cutomer name can be set at any point, and can be copied to another order. When Actors get a new message they will process it with `process/3`. They will be passed the current state of the Actor, the ID that was used to reach this Actor, and the message being passed to it. The Actor is responsible for processing any messages sent to it and returning a value indicating its new state and whether to persist it. See `Theater.Actor` for further documentation. """ use Theater.Actor @doc """ Create a new order. Orders start with no items and no customer name, but then process messages as normal. """ def init(id, {:get, pid}) do send(pid, {:order, id, :no_such_order}) :stop end def init(id, message) do process({:open, [], nil}, id, message) end @doc """ Process a message for a Counter. Sending `{:add, item}` will add `item` to the order. Sending `:pay` will stop accepting items and mark it ready to be shipped if it is open. Sending `:ship` will close out the order if it is ready to be shipped. Sending `{:set_name, name}` will set the customer name to `name`. Sending `{:copy_name_to, id}` will copy the customer name from this order to order `id`. Sending `{:get, pid}` will send a message to pid of the form `{:order, id, items, name}`. """ def process({_state, items, name}, id, {:get, pid}) do send(pid, {:order, id, items, name}) :no_update end def process({state, items, _}, _id, {:set_name, name}) do {:ok, {state, items, name}} end def process({_state, _items, name}, _id, {:copy_name_to, id}) do Theater.send(__MODULE__, id, {:set_name, name}) :no_update end def process({:open, items, name}, _id, {:add, item}) do {:ok, {:open, [item | items], name}} end def process({:open, items, name}, _id, :pay) do {:ok, {:ready_to_ship, items, name}} end def process({:ready_to_ship, _items, _name}, _id, :ship) do :stop end end
lib/theater/demo/order.ex
0.747432
0.592961
order.ex
starcoder