code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Snitch.Data.Model.TaxCategory do
@moduledoc """
Model functions Tax Category.
"""
use Snitch.Data.Model
import Ecto.Changeset
alias Ecto.Multi
alias Snitch.Core.Tools.MultiTenancy.MultiQuery
alias Snitch.Data.Schema.TaxCategory
@doc """
Creates a TaxCategory in the db with the supplied `params`.
To create, following fields can be provided in the params
map:
| field | type |
| --------- | ------ |
| name | string |
| tax_code | string |
| description | string |
| is_default? | boolean |
> Note :name field in the params is a `required` field.
> If `:is_default?` field is set to `true` then the current tax_category
is set as default and any previous tax category is unset from default.
## Example
params = %{
name: "Value Added Tax",
tax_code: "EU_VAT",
description: "value added tax"
}
{:ok, tax_category} = Snitch.Data.Model.TaxCategory.create(params)
"""
@spec create(map) ::
{:ok, TaxCategory.t()}
| {:error, Ecto.Changeset.t()}
def create(params) do
changeset = TaxCategory.create_changeset(%TaxCategory{}, params)
case fetch_change(changeset, :is_default?) do
{:ok, true} ->
clear_default_multi()
|> Multi.run(:tax_category, fn _ ->
QH.create(TaxCategory, params, Repo)
end)
|> persist()
_ ->
QH.create(TaxCategory, params, Repo)
end
end
@doc """
Updates a tax category as per the supplied fields in params.
The following fields are updatable:
| field | type |
| --------- | ------ |
| name | string |
| tax_code | string |
| description | string |
| is_default | boolean |
## Note
If the `:name` field is passed in `params` then it shouldn't be
empty.
If `:is_default?` field is set to `true` then the current `tax_category`
is set as default and any previous tax category is unset from default.
## Example
create_params = %{
name: "Value Added Tax",
tax_code: "EU_VAT",
description: "value added tax"
}
{:ok, tax_category} = Snitch.Data.Model.TaxCategory.create(create_params)
update_params = %{
name: "Value Added Tax",
tax_code: "EU_VAT",
description: "value added tax"
}
{:ok, tax_category} =
Snitch.Data.Model.TaxCategory.update(tax_category, params)
"""
@spec update(map, TaxCategory.t()) ::
{:ok, TaxCategory.t()}
| {:error, Ecto.Changeset.t()}
def update(params, instance \\ nil) do
with true <- Map.has_key?(params, :is_default?),
true <- params.is_default? do
clear_default_multi()
|> Multi.run(:tax_category, fn _ ->
QH.update(TaxCategory, params, instance, Repo)
end)
|> persist()
else
_ ->
QH.update(TaxCategory, params, instance, Repo)
end
end
@doc """
Returns a TaxCategory.
Takes as input 'id' field and an `active` flag.
When `active` is false, will return a TaxCategory even
if it's _soft deleted_.
> Note, By default tax category which is present in the table
and is __not soft deleted__ is returned.
"""
@spec get(integer, boolean) :: TaxCategory.t() | nil
def get(id, active \\ true) do
if active do
query = from(tc in TaxCategory, where: is_nil(tc.deleted_at) and tc.id == ^id)
Repo.one(query)
else
QH.get(TaxCategory, id, Repo)
end
end
@doc """
Returns a `list` of available tax categories.
Takes an `active` field. When `active` is false, will
return all the tax_categories, including those which are
_soft deleted_.
> Note the function returns only those tax categories
which are not soft deleted by default or if `active` is
set to true.
"""
@spec get_all(boolean) :: [TaxCategory.t()]
def get_all(active \\ true) do
if active do
query = from(tc in TaxCategory, where: is_nil(tc.deleted_at))
Repo.all(query)
else
Repo.all(TaxCategory)
end
end
@doc """
Soft deletes a TaxCategory passed to the function.
Takes as input the `instance` of the TaxCategory to be deleted.
"""
@spec delete(TaxCategory.t() | integer) ::
{:ok, TaxCategory.t()}
| {:error, Ecto.Changeset.t()}
def delete(id) when is_integer(id) do
params = %{deleted_at: DateTime.utc_now(), id: id}
QH.update(TaxCategory, params, Repo)
end
def delete(instance) do
params = %{deleted_at: DateTime.utc_now()}
QH.update(TaxCategory, params, instance, Repo)
end
defp clear_default_multi do
query = from(tc in TaxCategory, where: tc.is_default? == true)
MultiQuery.update_all(Multi.new(), :is_default, query, set: [is_default?: false])
end
defp persist(multi) do
case Repo.transaction(multi) do
{:ok, %{tax_category: tax_category}} ->
{:ok, tax_category}
{:error, _, _, _} = error ->
error
end
end
end | apps/snitch_core/lib/core/data/model/tax_category.ex | 0.890157 | 0.513059 | tax_category.ex | starcoder |
defmodule Blockchain.Account do
@moduledoc """
Represents the state of an account, as defined in Section 4
of the Yellow Paper.
"""
alias MerklePatriciaTree.Trie
@empty_keccak BitHelper.kec(<<>>)
@empty_trie MerklePatriciaTree.Trie.empty_trie_root_hash()
# State defined in Section 4.1 of the Yellow Paper
defstruct nonce: 0,
# σn
# σb
balance: 0,
# σs
storage_root: @empty_trie,
# σc
code_hash: @empty_keccak
# Types defined as Eq.(12) of the Yellow Paper
@type t :: %__MODULE__{
nonce: integer(),
balance: EVM.Wei.t(),
storage_root: EVM.trie_root(),
code_hash: MerklePatriciaTree.Trie.key()
}
@doc """
Checks whether or not an account is a non-contract account. This is defined in the latter
part of Section 4.1 of the Yellow Paper.
## Examples
iex> Blockchain.Account.is_simple_account?(%Blockchain.Account{})
true
iex> Blockchain.Account.is_simple_account?(%Blockchain.Account{code_hash: <<0x01, 0x02>>})
false
iex> Blockchain.Account.is_simple_account?(%Blockchain.Account{code_hash: <<197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112>>})
true
"""
@spec is_simple_account?(t) :: boolean()
def is_simple_account?(acct) do
acct.code_hash == @empty_keccak
end
@doc """
Encodes an account such that it can be represented in RLP encoding.
This is defined as Eq.(10) `p` in the Yellow Paper.
## Examples
iex> Blockchain.Account.serialize(%Blockchain.Account{nonce: 5, balance: 10, storage_root: <<0x00, 0x01>>, code_hash: <<0x01, 0x02>>})
[5, 10, <<0x00, 0x01>>, <<0x01, 0x02>>]
iex> Blockchain.Account.serialize(%Blockchain.Account{})
[
0,
0,
<<86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33>>,
<<197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112>>
]
"""
@spec serialize(t) :: ExRLP.t()
def serialize(account) do
[
account.nonce,
account.balance,
account.storage_root,
account.code_hash
]
end
@doc """
Decodes an account from an RLP encodable structure.
This is defined as Eq.(10) `p` in the Yellow Paper (reversed).
## Examples
iex> Blockchain.Account.deserialize([<<5>>, <<10>>, <<0x00, 0x01>>, <<0x01, 0x02>>])
%Blockchain.Account{nonce: 5, balance: 10, storage_root: <<0x00, 0x01>>, code_hash: <<0x01, 0x02>>}
iex> Blockchain.Account.deserialize([<<0>>, <<0>>, <<86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33>>, <<197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112>>])
%Blockchain.Account{}
"""
@spec deserialize(ExRLP.t()) :: t
def deserialize(rlp) do
[
nonce,
balance,
storage_root,
code_hash
] = rlp
%Blockchain.Account{
nonce: :binary.decode_unsigned(nonce),
balance: :binary.decode_unsigned(balance),
storage_root: storage_root,
code_hash: code_hash
}
end
@doc """
Loads an account from an address, as defined in Eq.(9), Eq.(10), Eq.(11)
and Eq.(12) of the Yellow Paper.
## Examples
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> MerklePatriciaTree.Trie.update(<<0x01::160>> |> BitHelper.kec, ExRLP.encode([5, 6, <<1>>, <<2>>]))
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{nonce: 5, balance: 6, storage_root: <<0x01>>, code_hash: <<0x02>>}
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> MerklePatriciaTree.Trie.update(<<0x01::160>> |> BitHelper.kec, <<>>)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
nil
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.get_account(<<0x01::160>>)
nil
"""
@spec get_account(EVM.state(), EVM.address()) :: t | nil
def get_account(state, address) do
case Trie.get(state, address |> BitHelper.kec()) do
nil ->
nil
# TODO: Is this the same as deleting the account?
<<>> ->
nil
encoded_account ->
encoded_account
|> ExRLP.decode()
|> deserialize()
end
end
@doc """
Helper function to load multiple accounts.
## Examples
iex> state = MerklePatriciaTree.Trie.update(MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db()), <<0x01::160>> |> BitHelper.kec, ExRLP.encode([5, 6, <<1>>, <<2>>]))
iex> Blockchain.Account.get_accounts(state, [<<0x01::160>>, <<0x02::160>>])
[
%Blockchain.Account{nonce: 5, balance: 6, storage_root: <<0x01>>, code_hash: <<0x02>>},
nil
]
"""
@spec get_accounts(EVM.state(), [EVM.address()]) :: [t | nil]
def get_accounts(state, addresses) do
for address <- addresses, do: get_account(state, address)
end
@doc """
Stores an account at a given address. This function handles serializing
the account, encoding it to RLP and placing into the given state trie.
## Examples
iex> state = Blockchain.Account.put_account(MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db()), <<0x01::160>>, %Blockchain.Account{nonce: 5, balance: 6, storage_root: <<0x01>>, code_hash: <<0x02>>})
iex> MerklePatriciaTree.Trie.get(state, <<0x01::160>> |> BitHelper.kec) |> ExRLP.decode
[<<5>>, <<6>>, <<0x01>>, <<0x02>>]
"""
@spec put_account(EVM.state(), EVM.address(), t) :: EVM.state()
def put_account(state, address, account) do
encoded_account =
account
|> serialize()
|> ExRLP.encode()
Trie.update(state, address |> BitHelper.kec(), encoded_account)
end
@doc """
Completely removes an account from the world state. This is used,
for instance, after a suicide. This is defined from Eq.(77) and
Eq.(78) in the Yellow Paper.
# TODO: Should we delete the storage root trie?
## Examples
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
...> |> Blockchain.Account.del_account(<<0x01::160>>)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
nil
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.del_account(<<0x01::160>>)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
nil
"""
@spec del_account(EVM.state(), EVM.address()) :: EVM.state()
def del_account(state, address) do
Trie.update(state, address |> BitHelper.kec(), <<>>)
end
@doc """
Gets and updates an account based on a given input
function `fun`. Account passed to `fun` will be blank
instead of nil if account doesn't exist.
## Examples
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
...> |> Blockchain.Account.update_account(<<0x01::160>>, fn (acc) -> %{acc | balance: acc.balance + 5} end)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 15}
iex> {_state, before_acct, after_acct} = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
...> |> Blockchain.Account.update_account(<<0x01::160>>, fn (acc) -> %{acc | balance: acc.balance + 5} end, true)
iex> before_acct.balance
10
iex> after_acct.balance
15
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.update_account(<<0x01::160>>, fn (acc) -> %{acc | nonce: acc.nonce + 1} end)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{nonce: 1}
"""
@spec update_account(EVM.state(), EVM.address(), (t -> t), boolean()) ::
EVM.state() | {EVM.state(), t, t}
def update_account(state, address, fun, return_accounts \\ false) do
account = get_account(state, address) || %__MODULE__{}
updated_account = fun.(account)
updated_state = put_account(state, address, updated_account)
if return_accounts do
{updated_state, account, updated_account}
else
updated_state
end
end
@doc """
Simple helper function to increment a nonce value.
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{nonce: 10})
iex> state
...> |> Blockchain.Account.increment_nonce(<<0x01::160>>)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{nonce: 11}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{nonce: 10})
iex> { _state, before_acct, after_acct } = Blockchain.Account.increment_nonce(state, <<0x01::160>>, true)
iex> before_acct.nonce
10
iex> after_acct.nonce
11
"""
@spec increment_nonce(EVM.state(), EVM.address(), boolean()) ::
EVM.state() | {EVM.state(), t, t}
def increment_nonce(state, address, return_accounts \\ false) do
update_account(
state,
address,
fn acct ->
%{acct | nonce: acct.nonce + 1}
end,
return_accounts
)
end
@doc """
Simple helper function to adjust wei in an account. Wei may be
positive (to add wei) or negative (to remove it). This function
will raise if we attempt to reduce wei in an account to less than zero.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.add_wei(<<0x01::160>>, 13)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 23}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.add_wei(<<0x01::160>>, -3)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 7}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.add_wei(<<0x01::160>>, -13)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
** (RuntimeError) wei reduced to less than zero
"""
@spec add_wei(EVM.state(), EVM.address(), EVM.Wei.t()) :: EVM.state()
def add_wei(state, address, delta_wei) do
update_account(state, address, fn acct ->
updated_balance = acct.balance + delta_wei
if updated_balance < 0, do: raise("wei reduced to less than zero")
%{acct | balance: updated_balance}
end)
end
@doc """
Even simpler helper function to adjust wei in an account negatively. Wei
may be positive (to subtract wei) or negative (to add it). This function
will raise if we attempt to reduce wei in an account to less than zero.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.dec_wei(<<0x01::160>>, 3)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 7}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.dec_wei(<<0x01::160>>, 13)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
** (RuntimeError) wei reduced to less than zero
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> state
...> |> Blockchain.Account.dec_wei(<<0x01::160>>, -3)
...> |> Blockchain.Account.get_account(<<0x01::160>>)
%Blockchain.Account{balance: 13}
"""
@spec dec_wei(EVM.state(), EVM.address(), EVM.Wei.t()) :: EVM.state()
def dec_wei(state, address, delta_wei), do: add_wei(state, address, -1 * delta_wei)
@doc """
Helper function for transferring eth for one account to another.
This handles the fact that a new account may be shadow-created if
it receives eth. See Section 8, Eq.(100), Eq.(101), Eq.(102, Eq.(103),
and Eq.(104) of the Yellow Paper.
The Yellow Paper assumes this function will always succeed (as the checks
occur before this function is called), but we'll check just in case
this function is not properly called. The only case will be if the
sending account is nil or has an insufficient balance, but we add
a few extra checks just in case.
Note: transferring value to an empty account still adds value to said account,
even though it's effectively a zombie.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
...> |> Blockchain.Account.put_account(<<0x02::160>>, %Blockchain.Account{balance: 5})
iex> {:ok, state} = Blockchain.Account.transfer(state, <<0x01::160>>, <<0x02::160>>, 3)
iex> {Blockchain.Account.get_account(state, <<0x01::160>>), Blockchain.Account.get_account(state, <<0x02::160>>)}
{%Blockchain.Account{balance: 7}, %Blockchain.Account{balance: 8}}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> {:ok, state} = Blockchain.Account.transfer(state, <<0x01::160>>, <<0x02::160>>, 3)
iex> {Blockchain.Account.get_account(state, <<0x01::160>>), Blockchain.Account.get_account(state, <<0x02::160>>)}
{%Blockchain.Account{balance: 7}, %Blockchain.Account{balance: 3}}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
iex> Blockchain.Account.transfer(state, <<0x01::160>>, <<0x02::160>>, 12)
{:error, "sender account insufficient wei"}
iex> Blockchain.Account.transfer(MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db()), <<0x01::160>>, <<0x02::160>>, -3)
{:error, "wei transfer cannot be negative"}
"""
@spec transfer(EVM.state(), EVM.address(), EVM.address(), EVM.Wei.t()) ::
{:ok, EVM.state()} | {:error, String.t()}
def transfer(state, from, to, wei) do
# TODO: Decide if we want to waste the cycles to pull
# the account information when `add_wei` will do that itself.
from_account = get_account(state, from)
cond do
wei < 0 ->
{:error, "wei transfer cannot be negative"}
from_account == nil ->
{:error, "sender account does not exist"}
from_account.balance < wei ->
{:error, "sender account insufficient wei"}
true ->
{:ok,
state
|> add_wei(from, -1 * wei)
|> add_wei(to, wei)}
end
end
@doc """
Performs transfer but raises instead of returning if an error occurs.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_account(<<0x01::160>>, %Blockchain.Account{balance: 10})
...> |> Blockchain.Account.put_account(<<0x02::160>>, %Blockchain.Account{balance: 5})
iex> state = Blockchain.Account.transfer!(state, <<0x01::160>>, <<0x02::160>>, 3)
iex> {Blockchain.Account.get_account(state, <<0x01::160>>), Blockchain.Account.get_account(state, <<0x02::160>>)}
{%Blockchain.Account{balance: 7}, %Blockchain.Account{balance: 8}}
"""
@spec transfer!(EVM.state(), EVM.address(), EVM.address(), EVM.Wei.t()) :: EVM.state()
def transfer!(state, from, to, wei) do
case transfer(state, from, to, wei) do
{:ok, state} -> state
{:error, reason} -> raise reason
end
end
@doc """
Puts code into a given account. Note, this will handle
the aspect that we need to store the code_hash outside of the
contract itself and only store the KEC of the code_hash.
This is defined in Eq.(98) and address in Section 4.1 under
`codeHash` in the Yellow Paper.
Not sure if this is correct, but I'm going to store the code_hash
in state, as well as the link to it in the Account object itself.
TODO: Verify the above ^^^ is accurate, as it's not spelled out
in the Yellow Paper directly.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_code(<<0x01::160>>, <<1, 2, 3>>)
iex> Blockchain.Account.get_account(state, <<0x01::160>>)
%Blockchain.Account{code_hash: <<241, 136, 94, 218, 84, 183, 160, 83, 49, 140, 212, 30,
32, 147, 34, 13, 171, 21, 214, 83, 129, 177, 21, 122, 54, 51, 168,
59, 253, 92, 146, 57>>}
iex> MerklePatriciaTree.DB.get(state.db, BitHelper.kec(<<1, 2, 3>>))
{:ok, <<1, 2, 3>>}
"""
@spec put_code(EVM.state(), EVM.address(), EVM.MachineCode.t()) :: EVM.state()
def put_code(state, contract_address, machine_code) do
kec = BitHelper.kec(machine_code)
MerklePatriciaTree.DB.put!(state.db, kec, machine_code)
state
|> update_account(contract_address, fn acct ->
%{acct | code_hash: kec}
end)
end
@doc """
Returns the machine code associated with the account at the given
address. This will return nil if the contract has
no associated code (i.e. it is a simple account).
We may return `:not_found`, indicating that we were not able to
find the given code hash in the state trie.
Alternatively, we will return `{:ok, machine_code}` where `machine_code`
may be the empty string `<<>>`.
Note from Yellow Paper:
> "it is assumed that the client will have stored the pair (KEC(I_b), I_b)
at some point prior in order to make the determinatio of Ib feasible"
## Examples
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.get_machine_code(<<0x01::160>>)
{:ok, <<>>}
iex> MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
...> |> Blockchain.Account.put_code(<<0x01::160>>, <<1, 2, 3>>)
...> |> Blockchain.Account.get_machine_code(<<0x01::160>>)
{:ok, <<1, 2, 3>>}
"""
@spec get_machine_code(EVM.state(), EVM.address()) :: {:ok, binary()} | :not_found
def get_machine_code(state, contract_address) do
# TODO: Do we have a standard for default account values
account = get_account(state, contract_address) || %__MODULE__{}
case account.code_hash do
@empty_keccak ->
{:ok, <<>>}
code_hash ->
case MerklePatriciaTree.DB.get(state.db, code_hash) do
nil -> :not_found
{:ok, machine_code} when is_binary(machine_code) -> {:ok, machine_code}
end
end
end
@doc """
Stores a value in the storage root of an account. This
is defined in Section 4.1 under **storageRoot** in the
Yellow Paper.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> updated_state = Blockchain.Account.put_storage(state, <<01::160>>, 5, 9)
iex> Blockchain.Account.get_storage(updated_state, <<01::160>>, 5)
{:ok, 9}
"""
@spec put_storage(EVM.state(), EVM.address(), integer(), integer()) :: t
def put_storage(state, address, key, value) do
update_account(state, address, fn acct ->
updated_storage_trie = storage_put(state.db, acct.storage_root, key, value)
%{acct | storage_root: updated_storage_trie.root_hash}
end)
end
@doc """
Gets a value from storage root of an account. See Section
4.1 under **storageRoot** from the Yellow Paper.
## Examples
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> updated_state = Blockchain.Account.put_storage(state, <<01::160>>, 5, 9)
iex> Blockchain.Account.get_storage(updated_state, <<01::160>>, 5)
{:ok, 9}
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> Blockchain.Account.get_storage(state, <<02::160>>, 5)
:account_not_found
iex> state = MerklePatriciaTree.Trie.new(MerklePatriciaTree.Test.random_ets_db())
iex> updated_state = Blockchain.Account.put_storage(state, <<01::160>>, 5, 9)
iex> Blockchain.Account.get_storage(updated_state, <<01::160>>, 55)
:key_not_found
"""
@spec get_storage(EVM.state(), EVM.address(), integer()) ::
{:ok, integer()} | :account_not_found | :key_not_found
def get_storage(state, address, key) do
case get_account(state, address) do
nil ->
:account_not_found
account ->
case storage_fetch(state.db, account.storage_root, key) do
nil -> :key_not_found
value -> {:ok, value |> :binary.decode_unsigned()}
end
end
end
@spec storage_put(DB.db(), EVM.EVM.trie_root(), integer(), integer()) :: EVM.trie_root()
defp storage_put(db, storage_root, key, value) do
Trie.new(db, storage_root)
|> Trie.update(
key |> :binary.encode_unsigned() |> BitHelper.pad(32) |> BitHelper.kec(),
value |> ExRLP.encode()
)
end
@spec storage_fetch(DB.db(), EVM.EVM.trie_root(), integer()) :: integer() | nil
defp storage_fetch(db, storage_root, key) do
Trie.new(db, storage_root)
|> Trie.get(key |> :binary.encode_unsigned() |> BitHelper.pad(32) |> BitHelper.kec())
end
end | lib/blockchain/account.ex | 0.793706 | 0.481454 | account.ex | starcoder |
defmodule Vapor.Provider.Dotenv do
@moduledoc """
The dotenv config provider will look for a `.env` file and load all of
the values for that file. The values can be written like so:
```
DATABASE_URL=https://localhost:9432
PORT=4000
REDIS_HOST=1234
```
If the file can't be found then this provider will still return an ok but
will (obviously) not load any configuration values. The primary use case for
this provider is local development where it might be inconvenient to add all
of the necessary environment variables on your local machine and it makes
tradeoffs for that use case.
## Existing environment variables
By default the dotenv provider won't overwrite any existing environment variables.
You can change this by setting the `overwrite` key to `true`:
%Dotenv{overwrite: true}
## File heirarchy
If no file is specified then the dotenv provider will load these files in this
order. Each proceeding file is loaded over the previous. In these examples `ENV`
will be the current mix environment: `dev`, `test`, or `prod`.
* `.env`
* `.env.ENV`
* `.env.local`
* `.env.ENV.local`
You should commit `.env` and `.env.ENV` files to your project and ignore any
`.local` files. This allows users to provide a custom setup if they need to
do that.
"""
defstruct filename: nil, overwrite: false
defimpl Vapor.Provider do
def load(%{filename: nil, overwrite: overwrite}) do
# Get the environment from mix. If mix isn't available we assume we're in
# a prod release
env = if Code.ensure_loaded?(Mix), do: Mix.env(), else: "prod"
files = [".env", ".env.#{env}", ".env.local", ".env.#{env}.local"]
files
|> Enum.reduce(%{}, fn file, acc -> Map.merge(acc, load_file(file)) end)
|> put_vars(overwrite)
{:ok, %{}}
end
def load(%{filename: filename, overwrite: overwrite}) do
filename
|> load_file
|> put_vars(overwrite)
{:ok, %{}}
end
defp load_file(file) do
case File.read(file) do
{:ok, contents} ->
parse(contents)
_ ->
%{}
end
end
def put_vars(vars, overwrite) do
for {k, v} <- vars do
if overwrite || System.get_env(k) == nil do
System.put_env(k, v)
end
end
end
defp parse(contents) do
contents
|> String.split(~r/\n/, trim: true)
|> Enum.reject(&comment?/1)
|> Enum.map(fn pair -> String.split(pair, "=", parts: 2) end)
|> Enum.filter(&good_pair/1)
|> Enum.map(fn [key, value] -> {String.trim(key), String.trim(value)} end)
|> Enum.map(fn {key, value} -> {key, value} end)
|> Enum.into(%{})
end
defp comment?(line) do
Regex.match?(~R/\A\s*#/, line)
end
defp good_pair(pair) do
case pair do
[key, value] ->
String.length(key) > 0 && String.length(value) > 0
_ ->
false
end
end
end
end | lib/vapor/providers/dotenv.ex | 0.837288 | 0.782205 | dotenv.ex | starcoder |
defmodule Marshall do
@moduledoc """
data marshalls.
"""
def fetch(data) do
case(data) do
[{"td", [{"style", "white-space:nowrap"}], ["USDOT Number"]}, {"td", [], [":"]}, _] ->
fetch_dot(data)
[{"td", [{"style", "white-space:nowrap"}], ["MC #"]}, {"td", [], [":"]}, _] ->
fetch_mc(data)
[
{"td", [{"style", "white-space:nowrap"}, {"valign", "top"}], ["Address"]},
{"td", [], [":"]},
_
] ->
fetch_address(data)
[
{"td", [{"style", "white-space:nowrap"}, {"valign", "top"}], ["Telephone"]},
{"td", [], [":"]},
_
] ->
fetch_telephone(data)
[
{"td", [{"width", "1%"}, {"style", "white-space:nowrap"}], ["Number of Tractors"]},
{"td", [{"width", "1%"}], [":"]},
_
] ->
fetch_tractors(data)
[
{"td", [{"style", "white-space:nowrap"}], ["Name"]},
{"td", [], [":"]},
_
] ->
fetch_name(data)
[
{"td", [{"style", "white-space:nowrap"}], ["Mailing Address"]},
{"td", [], [":"]},
_
] ->
fetch_mailing_address(data)
[
{"td", [{"style", "white-space:nowrap"}], ["Fax"]},
{"td", [], [":"]},
_
] ->
fetch_fax(data)
[
{"td", [{"width", "1%"}, {"style", "white-space:nowrap"}], ["Number of Trucks"]},
{"td", [{"width", "1%"}], [":"]},
_
] ->
fetch_number_of_trucks(data)
[
{"td", [{"width", "1%"}, {"style", "white-space:nowrap"}], ["Number of Trailers"]},
{"td", [{"width", "1%"}], [":"]},
_
] ->
fetch_number_of_trailers(data)
_ ->
nil
end
end
def fetch_dot(data) do
result =
case(data) do
[{"td", [{"style", "white-space:nowrap"}], ["USDOT Number"]}, {"td", [], [":"]}, _] ->
[{"td", [{"style", "white-space:nowrap"}], ["USDOT Number"]}, {"td", [], [":"]}, el] =
data
{"td", [{"nowrap", "nowrap"}, {"width", "98%"}], [selected]} = el
selected
_ ->
""
end
{"USDOT Number", result}
end
def fetch_name(data) do
result =
case(data) do
[
{"td", [{"style", "white-space:nowrap"}], ["Name"]},
{"td", [], [":"]},
_
] ->
[
{"td", [{"style", "white-space:nowrap"}], ["Name"]},
{"td", [], [":"]},
el
] = data
{"td", [{"style", "white-space:wrap"}, {"width", "98%"}], [selected]} = el
selected
_ ->
""
end
{"Name", result}
end
def fetch_mc(data) do
result =
case(data) do
[
{"td", [{"style", "white-space:nowrap"}], ["MC #"]},
{"td", [], [":"]},
_
] ->
[
{"td", [{"style", "white-space:nowrap"}], ["MC #"]},
{"td", [], [":"]},
el
] = data
{"td", [{"nowrap", "nowrap"}, {"width", "98%"}], [selected]} = el
selected
_ ->
""
end
{"MC", result}
end
def fetch_mailing_address(data) do
result =
case(data) do
[
{"td", [{"style", "white-space:nowrap"}], ["Mailing Address"]},
{"td", [], [":"]},
_
] ->
[
{"td", [{"style", "white-space:nowrap"}], ["Mailing Address"]},
{"td", [], [":"]},
el
] = data
{"td", _, address_list} = el
count = Enum.count(address_list)
case(count) do
0 ->
[
address,
{"br", [], []},
city
] = address_list
address =
address
|> String.replace("\r", "")
|> String.replace("\n", "")
|> String.replace("\t", "")
|> String.trim()
city =
city
|> String.replace("\r", "")
|> String.replace("\n", "")
|> String.replace("\t", "")
|> String.trim()
{address, city}
_ ->
{"", ""}
end
end
{"Mailing Address", result}
end
def fetch_fax(data) do
result =
case(data) do
[
{"td", [{"style", "white-space:nowrap"}], ["Fax"]},
{"td", [], [":"]},
_
] ->
[
{"td", [{"style", "white-space:nowrap"}], ["Fax"]},
{"td", [], [":"]},
el
] = data
{"td", [{"width", "98%"}], [selected]} = el
selected
_ ->
""
end
{"Fax", result}
end
def fetch_number_of_trucks(data) do
result =
case(data) do
[
{"td", [{"width", "1%"}, {"style", "white-space:nowrap"}], ["Number of Trucks"]},
{"td", [{"width", "1%"}], [":"]},
_
] ->
[
{"td", [{"width", "1%"}, {"style", "white-space:nowrap"}], ["Number of Trucks"]},
{"td", [{"width", "1%"}], [":"]},
el
] = data
{"td", [], [selected]} = el
selected
_ ->
""
end
{"Number of Trucks", result}
end
def fetch_number_of_trailers(data) do
result =
case(data) do
[
{"td", [{"width", "1%"}, {"style", "white-space:nowrap"}], ["Number of Trailers"]},
{"td", [{"width", "1%"}], [":"]},
_
] ->
[
{"td", [{"width", "1%"}, {"style", "white-space:nowrap"}], ["Number of Trailers"]},
{"td", [{"width", "1%"}], [":"]},
el
] = data
{"td", [], [selected]} = el
selected
_ ->
""
end
{"Number of Trailers", result}
end
def fetch_address(data) do
result =
case(data) do
[
{"td", [{"style", "white-space:nowrap"}, {"valign", "top"}], ["Address"]},
{"td", [], [":"]},
_
] ->
[
{"td", [{"style", "white-space:nowrap"}, {"valign", "top"}], ["Address"]},
{"td", [], [":"]},
el
] = data
{"td", [{"width", "98%"}], address_list} = el
[
address,
{"br", [], []},
city
] = address_list
address =
address
|> String.replace("\r", "")
|> String.replace("\n", "")
|> String.replace("\t", "")
|> String.trim()
city =
city
|> String.replace("\r", "")
|> String.replace("\n", "")
|> String.replace("\t", "")
|> String.trim()
{address, city}
_ ->
""
end
{"Address", result}
end
def fetch_telephone(data) do
result =
case(data) do
[
{"td", [{"style", "white-space:nowrap"}, {"valign", "top"}], ["Telephone"]},
{"td", [], [":"]},
_
] ->
[
{"td", [{"style", "white-space:nowrap"}, {"valign", "top"}], ["Telephone"]},
{"td", [], [":"]},
el
] = data
{"td", [{"width", "98%"}], [selected]} = el
selected
_ ->
""
end
{"Telephone", result}
end
def fetch_tractors(data) do
## IO.inspect(data)
result =
case(data) do
[
{"td", [{"width", "1%"}, {"style", "white-space:nowrap"}], ["Number of Tractors"]},
{"td", [{"width", "1%"}], [":"]},
_
] ->
[
{"td", [{"width", "1%"}, {"style", "white-space:nowrap"}], ["Number of Tractors"]},
{"td", [{"width", "1%"}], [":"]},
el
] = data
{"td", [], [selected]} = el
selected
_ ->
""
end
{"Number of Tractors", result}
end
def profile({:error, data}) do
{:error, data}
end
def profile(response) do
profile = %{
name: "",
phone: "",
fax: "",
mc: "",
dot: "",
mailing_address: "",
address: "",
number_of_trucks: "",
number_of_trailers: "",
number_of_tractors: ""
}
{main, alt} = response
main_data =
Enum.map(main, fn x ->
case(x) do
{"tr", [{"class", "MiddleTDFMCSA"}], _} ->
{"tr", [{"class", "MiddleTDFMCSA"}], data} = x
fetch(data)
_ ->
nil
end
end)
alt_data =
Enum.map(alt, fn x ->
case(x) do
{"tr", [{"class", "MiddleAltTDFMCSA"}], _} ->
{"tr", [{"class", "MiddleAltTDFMCSA"}], data} = x
fetch(data)
_ ->
nil
end
end)
main_data = Enum.reject(main_data, &is_nil/1)
alt_data = Enum.reject(alt_data, &is_nil/1)
[
{"USDOT Number", dot},
{"Address", address},
{"Telephone", phone},
{"Number of Tractors", tractors}
] = main_data
profile = Map.put(profile, :dot, dot)
profile = Map.put(profile, :address, address)
profile = Map.put(profile, :phone, phone)
profile = Map.put(profile, :number_of_tractors, tractors)
count = Enum.count(alt_data)
profile =
case(count) do
5 ->
[
{"Name", name},
{"Mailing Address", mailing_address},
{"Fax", fax},
{"Number of Trucks", trucks},
{"Number of Trailers", trailers}
] = alt_data
profile = Map.put(profile, :name, name)
profile = Map.put(profile, :mc, "")
profile = Map.put(profile, :mailing_address, mailing_address)
profile = Map.put(profile, :fax, fax)
profile = Map.put(profile, :number_of_trucks, trucks)
profile = Map.put(profile, :number_of_trailers, trailers)
profile
6 ->
[
{"Name", name},
{"MC", mc},
{"Mailing Address", mailing_address},
{"Fax", fax},
{"Number of Trucks", trucks},
{"Number of Trailers", trailers}
] = alt_data
profile = Map.put(profile, :name, name)
profile = Map.put(profile, :mc, mc)
profile = Map.put(profile, :mailing_address, mailing_address)
profile = Map.put(profile, :fax, fax)
profile = Map.put(profile, :number_of_trucks, trucks)
profile = Map.put(profile, :number_of_trailers, trailers)
profile
end
main_res =
case(Enum.count(main_data) > 0) do
true ->
{:ok, main_data}
false ->
{:error, "unable to extract content"}
end
alt_res =
case(Enum.count(alt_data) > 0) do
true ->
{:ok, alt_data}
false ->
{:error, "unable to extract content"}
end
{main_status, _} = main_res
{alt_status, _} = alt_res
error = main_status == alt_status == :error
case(error) do
true ->
{:error, {main_res, alt_res}}
false ->
{:ok, profile}
end
end
def companies({:error, data}) do
{:error, data}
end
def companies(response) do
companies =
Enum.map(response, fn x ->
name =
case(x) do
{_, _, [{_, [{_, url}], [name]}, _]} ->
{name, url}
_ ->
{:error, "unable to extract page content"}
end
name
end)
case(companies) do
{:error, content} ->
{:error, content}
_ ->
c = Enum.reject(companies, &is_nil/1)
{:ok, c}
end
end
def company_names(response) do
companies =
Enum.map(response, fn x ->
name =
case(x) do
{_, _, [{_, [{_, url}], [name]}, _]} ->
{name, url}
_ ->
nil
end
name
end)
c = Enum.reject(companies, &is_nil/1)
{:ok, c}
end
def json(data) do
data
end
end | lib/marshall.ex | 0.513668 | 0.652435 | marshall.ex | starcoder |
defmodule ExSaga.Stepper do
@moduledoc """
"""
alias ExSaga.{ErrorHandler, Event, Hook, Retry, Stage, State, Stepable}
@doc """
"""
@callback handle_sublevel_step(stepable :: Stepable.t(), event :: Event.t(), opts :: Stepable.opts()) ::
Stepable.stage_result() | Stepable.step_result() | nil
@doc """
"""
@callback handle_step(stepable :: Stepable.t(), event :: Event.t(), opts :: Stepable.opts()) ::
Stepable.stage_result() | Stepable.step_result() | nil
@doc false
defmacro __using__(opts) do
quote do
@behaviour ExSaga.Stepper
alias ExSaga.{Event, Stage, Stepable, Stepper}
@compensation_event_name Keyword.get(unquote(opts), :compensation_event_name, [])
@doc """
"""
@spec compensation_event_name() :: Stage.full_name()
def compensation_event_name() do
@compensation_event_name
end
@doc """
"""
@spec step(Stepable.t(), Event.t(), Stepable.opts()) :: Stepable.stage_result() | Stepable.step_result()
def step(stepable, event, opts \\ []) do
Stepper.step(__MODULE__, stepable, event, opts)
end
@impl Stepper
def handle_sublevel_step(stepable, event, opts) do
step(%{stepable | state: %{stepable.state | sublevel?: false}}, event, opts)
end
defoverridable handle_sublevel_step: 3
end
end
@doc """
"""
@spec step(stepper :: module, Stepable.t(), Event.t(), Stepable.opts()) ::
Stepable.stage_result() | Stepable.step_result()
def step(stepper, stepable, event, opts \\ [])
def step(stepper, stepable, %Event{name: [_, :hook, _]} = event, opts) do
case Hook.step(event, stepable.state, opts) do
{nil, new_state} ->
case event.context do
{%Event{} = inner, _} ->
step(stepper, %{stepable | state: new_state}, inner, opts)
%Event{} = e ->
step(stepper, %{stepable | state: new_state}, e, opts)
_ ->
{:continue, nil, %{stepable | state: new_state}}
end
{%Event{name: nil} = e, new_state} ->
event_name = stepper.compensation_event_name()
{:continue, Event.update(e, name: event_name), %{stepable | state: new_state}}
{next_event, new_state} ->
{:continue, next_event, %{stepable | state: new_state}}
end
end
def step(stepper, %{state: %State{sublevel?: true, hooks_left: []}} = stepable, event, opts) do
case stepper.handle_sublevel_step(stepable, event, opts) do
nil -> unknown_step(stepable, event, opts)
otherwise -> otherwise
end
end
def step(_stepper, %{state: %State{hooks_left: []}} = stepable, %Event{name: [_, :retry, _]} = event, opts) do
case Retry.step(stepable.state, event, opts) do
{:continue, event, state} ->
{:continue, event, %{stepable | state: state}}
{:noretry, _event, state} ->
%{effects_so_far: effects_so_far, reason: reason} = state
{:error, reason, effects_so_far}
{:retry, event, state} ->
%{effects_so_far: effects_so_far} = state
event =
Event.update(event,
name: [:starting, :transaction],
context: effects_so_far
)
{:continue, event, %{stepable | state: state}}
end
end
def step(_stepper, %{state: %State{hooks_left: []}} = stepable, %Event{name: [_, :error_handler]} = event, opts) do
{:continue, event, state} = ErrorHandler.step(stepable.state, event, opts)
{:continue, event, %{stepable | state: state}}
end
def step(stepper, %{state: %State{hooks_left: []}} = stepable, event, opts) do
case stepper.handle_step(stepable, event, opts) do
nil -> unknown_step(stepable, event, opts)
otherwise -> otherwise
end
end
def step(_stepper, %{state: %State{hooks_left: [h | hs]}} = stepable, event, opts) do
stepable = %{stepable | state: %{stepable.state | hooks_left: hs}}
{:continue, Hook.maybe_execute_hook(h, event, stepable.state, opts), stepable}
end
@doc false
@spec unknown_step(Stepable.t(), Event.t(), Stepable.opts()) :: Stepable.step_result()
defp unknown_step(stepable, event, opts) do
reason = {:unknown_event, event}
%{state: %{effects_so_far: effects_so_far}} = stepable
event =
Event.update(event,
name: [:starting, :error_handler],
context: {reason, event, effects_so_far}
)
{:continue, event, %{stepable | state: %{stepable.state | hooks_left: Hook.merge_hooks(stepable.state, opts)}}}
end
end | lib/ex_saga/stepper.ex | 0.826467 | 0.458773 | stepper.ex | starcoder |
defmodule RDF.PrefixMap do
@moduledoc """
A mapping a prefix atoms to IRI namespaces.
`RDF.PrefixMap` implements the `Enumerable` protocol.
"""
alias RDF.IRI
@type prefix :: atom
@type namespace :: IRI.t()
@type coercible_prefix :: atom | String.t()
@type coercible_namespace :: RDF.Vocabulary.Namespace.t() | String.t() | IRI.t()
@type prefix_map :: %{prefix => namespace}
@type conflict_resolver ::
(coercible_prefix, coercible_namespace, coercible_namespace -> coercible_namespace)
| :ignore
| :overwrite
@type t :: %__MODULE__{
map: prefix_map
}
defstruct map: %{}
@doc """
Creates an empty `RDF.PrefixMap`.
"""
@spec new :: t
def new, do: %__MODULE__{}
@doc """
Creates a new `RDF.PrefixMap` with initial mappings.
The initial prefix mappings can be passed as keyword lists or maps.
The keys for the prefixes can be given as atoms or strings and will be normalized to atoms.
The namespaces can be given as `RDF.IRI`s or strings and will be normalized to `RDF.IRI`s.
"""
@spec new(t | map | keyword) :: t
def new(map)
def new(%__MODULE__{} = prefix_map), do: prefix_map
def new(map) when is_map(map) do
%__MODULE__{map: Map.new(map, &normalize/1)}
end
def new(map) when is_list(map) do
map |> Map.new() |> new()
end
defp normalize({prefix, namespace}) when is_atom(prefix),
do: {prefix, IRI.coerce_base(namespace)}
defp normalize({prefix, namespace}) when is_binary(prefix),
do: normalize({String.to_atom(prefix), namespace})
defp normalize({prefix, namespace}),
do:
raise(ArgumentError, "Invalid prefix mapping: #{inspect(prefix)} => #{inspect(namespace)}")
@doc """
Adds a prefix mapping to `prefix_map`.
Unless a mapping of `prefix` to a different namespace already exists,
an `:ok` tuple is returned, otherwise an `:error` tuple.
"""
@spec add(t, coercible_prefix, coercible_namespace) :: {:ok, t} | {:error, String.t()}
def add(prefix_map, prefix, namespace)
def add(%__MODULE__{map: map}, prefix, %IRI{} = namespace) when is_atom(prefix) do
if conflicts?(map, prefix, namespace) do
{:error, "prefix #{inspect(prefix)} is already mapped to another namespace"}
else
{:ok, %__MODULE__{map: Map.put(map, prefix, namespace)}}
end
end
def add(%__MODULE__{} = prefix_map, prefix, namespace) do
with {prefix, namespace} = normalize({prefix, namespace}) do
add(prefix_map, prefix, namespace)
end
end
@doc """
Adds a prefix mapping to the given `RDF.PrefixMap` and raises an exception in error cases.
"""
@spec add!(t, coercible_prefix, coercible_namespace) :: t
def add!(prefix_map, prefix, namespace) do
with {:ok, new_prefix_map} <- add(prefix_map, prefix, namespace) do
new_prefix_map
else
{:error, error} -> raise error
end
end
@doc """
Adds a prefix mapping to `prefix_map` overwriting an existing mapping.
"""
@spec put(t, coercible_prefix, coercible_namespace) :: t
def put(prefix_map, prefix, namespace)
def put(%__MODULE__{map: map}, prefix, %IRI{} = namespace) when is_atom(prefix) do
%__MODULE__{map: Map.put(map, prefix, namespace)}
end
def put(%__MODULE__{} = prefix_map, prefix, namespace) do
with {prefix, namespace} = normalize({prefix, namespace}) do
put(prefix_map, prefix, namespace)
end
end
@doc """
Merges two `RDF.PrefixMap`s.
The second prefix map can also be given as any structure which can converted
to a `RDF.PrefixMap` via `new/1`.
If the prefix maps can be merged without conflicts, that is there are no
prefixes mapped to different namespaces an `:ok` tuple is returned.
Otherwise an `:error` tuple with the list of prefixes with conflicting
namespaces is returned.
See also `merge/3` which allows you to resolve conflicts with a function.
"""
@spec merge(t, t | map | keyword) :: {:ok, t} | {:error, [atom | String.t()]}
def merge(prefix_map1, prefix_map2)
def merge(%__MODULE__{map: map1}, %__MODULE__{map: map2}) do
with [] <- merge_conflicts(map1, map2) do
{:ok, %__MODULE__{map: Map.merge(map1, map2)}}
else
conflicts -> {:error, conflicts}
end
end
def merge(%__MODULE__{} = prefix_map, other_prefixes) do
merge(prefix_map, new(other_prefixes))
rescue
FunctionClauseError ->
raise ArgumentError, "#{inspect(other_prefixes)} is not convertible to a RDF.PrefixMap"
end
@doc """
Merges two `RDF.PrefixMap`s, resolving conflicts through the given `conflict_resolver` function.
The second prefix map can also be given as any structure which can converted
to a `RDF.PrefixMap` via `new/1`.
The given function will be invoked when there are conflicting mappings of
prefixes to different namespaces; its arguments are `prefix`, `namespace1`
(the namespace for the prefix in the first prefix map),
and `namespace2` (the namespace for the prefix in the second prefix map).
The value returned by the `conflict_resolver` function is used as the namespace
for the prefix in the resulting prefix map.
Non-`RDF.IRI` values will be tried to be converted to `RDF.IRI`s via
`RDF.IRI.new` implicitly.
The most common conflict resolution strategies on can be chosen directly with
the following atoms:
- `:ignore`: keep the original namespace from `prefix_map1`
- `:overwrite`: use the other namespace from `prefix_map2`
If a conflict can't be resolved, the provided function can return `nil`.
This will result in an overall return of an `:error` tuple with the list of
prefixes for which the conflict couldn't be resolved.
If everything could be merged, an `:ok` tuple is returned.
"""
@spec merge(t, t | map | keyword, conflict_resolver | nil) ::
{:ok, t} | {:error, [atom | String.t()]}
def merge(prefix_map1, prefix_map2, conflict_resolver)
def merge(prefix_map1, prefix_map2, :ignore) do
merge(prefix_map1, prefix_map2, fn _, ns, _ -> ns end)
end
def merge(prefix_map1, prefix_map2, :overwrite) do
merge(prefix_map1, prefix_map2, fn _, _, ns -> ns end)
end
def merge(%__MODULE__{map: map1}, %__MODULE__{map: map2}, conflict_resolver)
when is_function(conflict_resolver) do
conflict_resolution = fn prefix, namespace1, namespace2 ->
case conflict_resolver.(prefix, namespace1, namespace2) do
nil -> :conflict
result -> IRI.new(result)
end
end
with resolved_merge = Map.merge(map1, map2, conflict_resolution),
[] <- resolved_merge_rest_conflicts(resolved_merge) do
{:ok, %__MODULE__{map: resolved_merge}}
else
conflicts -> {:error, conflicts}
end
end
def merge(%__MODULE__{} = prefix_map1, prefix_map2, conflict_resolver)
when is_function(conflict_resolver) do
merge(prefix_map1, new(prefix_map2), conflict_resolver)
end
def merge(prefix_map1, prefix_map2, nil), do: merge(prefix_map1, prefix_map2)
defp resolved_merge_rest_conflicts(map) do
Enum.reduce(map, [], fn
{prefix, :conflict}, conflicts -> [prefix | conflicts]
_, conflicts -> conflicts
end)
end
defp merge_conflicts(map1, map2) do
Enum.reduce(map1, [], fn {prefix, namespace}, conflicts ->
if conflicts?(map2, prefix, namespace) do
[prefix | conflicts]
else
conflicts
end
end)
end
defp conflicts?(map, prefix, namespace) do
(existing_namespace = Map.get(map, prefix)) && existing_namespace != namespace
end
@doc """
Merges two `RDF.PrefixMap`s and raises an exception in error cases.
See `merge/2` and `merge/3` for more information on merging prefix maps.
"""
@spec merge!(t, t | map | keyword, conflict_resolver | nil) :: t
def merge!(prefix_map1, prefix_map2, conflict_resolver \\ nil) do
with {:ok, new_prefix_map} <- merge(prefix_map1, prefix_map2, conflict_resolver) do
new_prefix_map
else
{:error, conflicts} ->
conflicts = conflicts |> Stream.map(&inspect/1) |> Enum.join(", ")
raise "conflicting prefix mappings: #{conflicts}"
end
end
@doc """
Deletes the prefix mapping for `prefix` from `prefix_map`.
If no mapping for `prefix` exists, `prefix_map` is returned unchanged.
"""
@spec delete(t, coercible_prefix) :: t
def delete(prefix_map, prefix)
def delete(%__MODULE__{map: map}, prefix) when is_atom(prefix) do
%__MODULE__{map: Map.delete(map, prefix)}
end
def delete(prefix_map, prefix) when is_binary(prefix) do
delete(prefix_map, String.to_atom(prefix))
end
@doc """
Drops the given `prefixes` from `prefix_map`.
If `prefixes` contains prefixes that are not in `prefix_map`, they're simply ignored.
"""
@spec drop(t, [coercible_prefix]) :: t
def drop(prefix_map, prefixes)
def drop(%__MODULE__{map: map}, prefixes) do
%__MODULE__{
map:
Map.drop(
map,
Enum.map(prefixes, fn
prefix when is_binary(prefix) -> String.to_atom(prefix)
other -> other
end)
)
}
end
@doc """
Returns the namespace for the given `prefix` in `prefix_map`.
Returns `nil`, when the given `prefix` is not present in `prefix_map`.
"""
@spec namespace(t, coercible_prefix) :: namespace | nil
def namespace(prefix_map, prefix)
def namespace(%__MODULE__{map: map}, prefix) when is_atom(prefix) do
Map.get(map, prefix)
end
def namespace(prefix_map, prefix) when is_binary(prefix) do
namespace(prefix_map, String.to_atom(prefix))
end
@doc """
Returns the prefix for the given `namespace` in `prefix_map`.
Returns `nil`, when the given `namespace` is not present in `prefix_map`.
"""
@spec prefix(t, coercible_namespace) :: coercible_prefix | nil
def prefix(prefix_map, namespace)
def prefix(%__MODULE__{map: map}, %IRI{} = namespace) do
Enum.find_value(map, fn {prefix, ns} -> ns == namespace && prefix end)
end
def prefix(prefix_map, namespace) when is_binary(namespace) do
prefix(prefix_map, IRI.new(namespace))
end
@doc """
Returns whether the given prefix exists in the given `RDF.PrefixMap`.
"""
@spec has_prefix?(t, coercible_prefix) :: boolean
def has_prefix?(prefix_map, prefix)
def has_prefix?(%__MODULE__{map: map}, prefix) when is_atom(prefix) do
Map.has_key?(map, prefix)
end
def has_prefix?(prefix_map, prefix) when is_binary(prefix) do
has_prefix?(prefix_map, String.to_atom(prefix))
end
@doc """
Returns all prefixes from the given `RDF.PrefixMap`.
"""
@spec prefixes(t) :: [coercible_prefix]
def prefixes(%__MODULE__{map: map}) do
Map.keys(map)
end
@doc """
Returns all namespaces from the given `RDF.PrefixMap`.
"""
@spec namespaces(t) :: [coercible_namespace]
def namespaces(%__MODULE__{map: map}) do
Map.values(map)
end
@doc """
Converts an IRI into a prefixed name.
Returns `nil` when no prefix for the namespace of `iri` is defined in `prefix_map`.
## Examples
iex> RDF.PrefixMap.new(ex: "http://example.com/")
...> |> RDF.PrefixMap.prefixed_name(~I<http://example.com/Foo>)
"ex:Foo"
iex> RDF.PrefixMap.new(ex: "http://example.com/")
...> |> RDF.PrefixMap.prefixed_name("http://example.com/Foo")
"ex:Foo"
"""
@spec prefixed_name(t, IRI.t() | String.t()) :: String.t() | nil
def prefixed_name(prefix_map, iri)
def prefixed_name(%__MODULE__{} = prefix_map, %IRI{} = iri) do
prefixed_name(prefix_map, IRI.to_string(iri))
end
def prefixed_name(%__MODULE__{} = prefix_map, iri) when is_binary(iri) do
case prefix_name_pair(prefix_map, iri) do
{prefix, name} -> prefix <> ":" <> name
_ -> nil
end
end
@doc false
@spec prefix_name_pair(t, IRI.t() | String.t()) :: {String.t(), String.t()} | nil
def prefix_name_pair(prefix_map, iri)
def prefix_name_pair(%__MODULE__{} = prefix_map, %IRI{} = iri) do
prefix_name_pair(prefix_map, IRI.to_string(iri))
end
def prefix_name_pair(%__MODULE__{} = prefix_map, iri) when is_binary(iri) do
Enum.find_value(prefix_map, fn {prefix, namespace} ->
case String.trim_leading(iri, IRI.to_string(namespace)) do
^iri ->
nil
truncated_name ->
unless String.contains?(truncated_name, ~w[/ #]) do
{to_string(prefix), truncated_name}
end
end
end)
end
@doc """
Converts a prefixed name into an IRI.
Returns `nil` when the prefix in `prefixed_name` is not defined in `prefix_map`.
## Examples
iex> RDF.PrefixMap.new(ex: "http://example.com/")
...> |> RDF.PrefixMap.prefixed_name_to_iri("ex:Foo")
~I<http://example.com/Foo>
"""
@spec prefixed_name_to_iri(t, String.t()) :: IRI.t() | nil
def prefixed_name_to_iri(%__MODULE__{} = prefix_map, prefixed_name)
when is_binary(prefixed_name) do
case String.split(prefixed_name, ":", parts: 2) do
[prefix, name] ->
if ns = namespace(prefix_map, prefix) do
RDF.iri(ns.value <> name)
end
_ ->
nil
end
end
defimpl Enumerable do
def reduce(%RDF.PrefixMap{map: map}, acc, fun), do: Enumerable.reduce(map, acc, fun)
def member?(%RDF.PrefixMap{map: map}, mapping), do: Enumerable.member?(map, mapping)
def count(%RDF.PrefixMap{map: map}), do: Enumerable.count(map)
def slice(%RDF.PrefixMap{map: map}), do: Enumerable.slice(map)
end
defimpl Inspect do
import Inspect.Algebra
def inspect(prefix_map, opts) do
map = Map.to_list(prefix_map.map)
open = color("%RDF.PrefixMap{", :map, opts)
sep = color(",", :map, opts)
close = color("}", :map, opts)
container_doc(open, map, close, opts, &Inspect.List.keyword/2,
separator: sep,
break: :strict
)
end
end
end | lib/rdf/prefix_map.ex | 0.919895 | 0.578448 | prefix_map.ex | starcoder |
defmodule Kino.ETS do
@moduledoc """
A widget for interactively viewing an ETS table.
## Examples
tid = :ets.new(:users, [:set, :public])
Kino.ETS.new(tid)
Kino.ETS.new(:elixir_config)
"""
@doc false
use GenServer, restart: :temporary
alias Kino.Utils.Table
defstruct [:pid]
@type t :: %__MODULE__{pid: pid()}
@typedoc false
@type state :: %{
parent_monitor_ref: reference(),
tid: :ets.tid()
}
@doc """
Starts a widget process representing the given ETS table.
Note that private tables cannot be read by an arbitrary process,
so the given table must have either public or protected access.
"""
@spec new(:ets.tid()) :: t()
def new(tid) do
case :ets.info(tid, :protection) do
:private ->
raise ArgumentError,
"the given table must be either public or protected, but a private one was given"
:undefined ->
raise ArgumentError,
"the given table identifier #{inspect(tid)} does not refer to an existing ETS table"
_ ->
:ok
end
parent = self()
opts = [tid: tid, parent: parent]
{:ok, pid} = DynamicSupervisor.start_child(Kino.WidgetSupervisor, {__MODULE__, opts})
%__MODULE__{pid: pid}
end
# TODO: remove in v0.3.0
@deprecated "Use Kino.ETS.new/1 instead"
def start(tid), do: new(tid)
@doc false
def start_link(opts) do
GenServer.start_link(__MODULE__, opts)
end
@impl true
def init(opts) do
tid = Keyword.fetch!(opts, :tid)
parent = Keyword.fetch!(opts, :parent)
parent_monitor_ref = Process.monitor(parent)
{:ok, %{parent_monitor_ref: parent_monitor_ref, tid: tid}}
end
@impl true
def handle_info({:connect, pid}, state) do
table_name = :ets.info(state.tid, :name)
name = "ETS #{inspect(table_name)}"
columns =
case :ets.match_object(state.tid, :_, 1) do
{[record], _} -> Table.columns_for_records([record])
:"$end_of_table" -> []
end
send(
pid,
{:connect_reply, %{name: name, columns: columns, features: [:refetch, :pagination]}}
)
{:noreply, state}
end
def handle_info({:get_rows, pid, rows_spec}, state) do
records = get_records(state.tid, rows_spec)
rows = Enum.map(records, &record_to_row/1)
total_rows = :ets.info(state.tid, :size)
columns =
case records do
[] -> :initial
records -> Table.columns_for_records(records)
end
send(pid, {:rows, %{rows: rows, total_rows: total_rows, columns: columns}})
{:noreply, state}
end
def handle_info({:DOWN, ref, :process, _object, _reason}, %{parent_monitor_ref: ref} = state) do
{:stop, :shutdown, state}
end
defp get_records(tid, rows_spec) do
query = :ets.table(tid)
cursor = :qlc.cursor(query)
if rows_spec.offset > 0 do
:qlc.next_answers(cursor, rows_spec.offset)
end
records = :qlc.next_answers(cursor, rows_spec.limit)
:qlc.delete_cursor(cursor)
records
end
defp record_to_row(record) do
fields =
record
|> Tuple.to_list()
|> Enum.with_index()
|> Map.new(fn {val, idx} -> {idx, inspect(val)} end)
# Note: id is opaque to the client, and we don't need it for now
%{id: nil, fields: fields}
end
end | lib/kino/ets.ex | 0.688259 | 0.505981 | ets.ex | starcoder |
defmodule Example_Registry do
def start do
{:ok, _} = Registry.start_link(keys: :unique, name: Registry.ViaTest)
name = {:via, Registry, {Registry.ViaTest, "agent"}}
{:ok, _} = Agent.start_link(fn -> 0 end, name: name)
Agent.get(name, & &1)
Agent.update(name, &(&1 + 3))
Agent.get(name, & &1)
end
def start2 do
Registry.lookup(Registry.ViaTest, "agent")
end
def start3 do
{:ok, _} =
Registry.start_link(
keys: :duplicate,
name: Registry.PubSubTest,
partitions: System.schedulers_online()
)
{:ok, _} = Registry.register(Registry.PubSubTest, "hello", [])
Registry.dispatch(Registry.PubSubTest, "hello", fn entries ->
for {pid, _} <- entries, do: send(pid, {:broadcast, "world"})
end)
end
def start4 do
Registry.start_link(keys: :unique, name: Registry.UniqueCountTest)
Registry.count(Registry.UniqueCountTest)
{:ok, _} = Registry.register(Registry.UniqueCountTest, "hello", :world)
{:ok, _} = Registry.register(Registry.UniqueCountTest, "world", :world)
Registry.count(Registry.UniqueCountTest)
end
def start5 do
Registry.start_link(keys: :duplicate, name: Registry.CountMatchTest)
{:ok, _} = Registry.register(Registry.CountMatchTest, "hello", {1, :atom, 1})
{:ok, _} = Registry.register(Registry.CountMatchTest, "hello", {2, :atom, 2})
Registry.count_match(Registry.CountMatchTest, "hello", {1, :_, :_})
end
def start6 do
Registry.start_link(keys: :unique, name: Registry.CountSelectTest)
{:ok, _} = Registry.register(Registry.CountSelectTest, "hello", :value)
{:ok, _} = Registry.register(Registry.CountSelectTest, "world", :value)
Registry.count_select(Registry.CountSelectTest, [{{:_, :_, :value}, [], [true]}])
end
def start7 do
Registry.start_link(keys: :unique, name: Registry.DeleteMetaTest)
Registry.put_meta(Registry.DeleteMetaTest, :custom_key, "custom_value")
Registry.meta(Registry.DeleteMetaTest, :custom_key)
end
def start8 do
Registry.start_link(keys: :unique, name: Registry.UniqueKeysTest)
Registry.keys(Registry.UniqueKeysTest, self())
{:ok, _} = Registry.register(Registry.UniqueKeysTest, "hello", :world)
Registry.register(Registry.UniqueKeysTest, "hello", :later) # registry is :unique
Registry.keys(Registry.UniqueKeysTest, self())
end
def start9 do
Registry.start_link(keys: :unique, name: Registry.UniqueLookupTest)
Registry.lookup(Registry.UniqueLookupTest, "hello")
{:ok, _} = Registry.register(Registry.UniqueLookupTest, "hello", :world)
Registry.lookup(Registry.UniqueLookupTest, "hello")
Task.async(fn -> Registry.lookup(Registry.UniqueLookupTest, "hello") end) |> Task.await()
end
def start10 do
Registry.start_link(keys: :duplicate, name: Registry.MatchTest)
{:ok, _} = Registry.register(Registry.MatchTest, "hello", {1, :atom, 1})
{:ok, _} = Registry.register(Registry.MatchTest, "hello", {2, :atom, 2})
Registry.match(Registry.MatchTest, "hello", {1, :_, :_})
end
def start11 do
Registry.start_link(keys: :unique, name: Registry.SelectAllTest)
{:ok, _} = Registry.register(Registry.SelectAllTest, "hello", :value)
{:ok, _} = Registry.register(Registry.SelectAllTest, "world", :value)
Registry.select(Registry.SelectAllTest, [{{:"$1", :"$2", :"$3"}, [], [{{:"$1", :"$2", :"$3"}}]}])
end
def start12 do
Registry.start_link(keys: :unique, name: Registry.SelectAllTest)
{:ok, _} = Registry.register(Registry.SelectAllTest, "hello", :value)
{:ok, _} = Registry.register(Registry.SelectAllTest, "world", :value)
Registry.select(Registry.SelectAllTest, [{{:"$1", :_, :_}, [], [:"$1"]}])
end
def start13 do
Registry.start_link(keys: :unique, name: Registry.UniqueUnregisterTest)
Registry.register(Registry.UniqueUnregisterTest, "hello", :world)
Registry.keys(Registry.UniqueUnregisterTest, self())
end
def start14 do
Registry.start_link(keys: :unique, name: Registry.UniqueUnregisterMatchTest)
Registry.register(Registry.UniqueUnregisterMatchTest, "hello", :world)
Registry.keys(Registry.UniqueUnregisterMatchTest, self())
Registry.unregister_match(Registry.UniqueUnregisterMatchTest, "hello", :foo)
Registry.keys(Registry.UniqueUnregisterMatchTest, self())
end
def start15 do
Registry.start_link(keys: :duplicate, name: Registry.DuplicateUnregisterMatchTest)
Registry.register(Registry.DuplicateUnregisterMatchTest, "hello", :world_a)
Registry.register(Registry.DuplicateUnregisterMatchTest, "hello", :world_b)
Registry.register(Registry.DuplicateUnregisterMatchTest, "hello", :world_c)
Registry.keys(Registry.DuplicateUnregisterMatchTest, self())
end
def start16 do
end
end | lib/beam/registry/registry.ex | 0.543227 | 0.453443 | registry.ex | starcoder |
defmodule GenFSM.Behaviour do
@moduledoc """
This module is a convenience for defining GenFSM callbacks in Elixir.
A finite state machine (FSM) is responsible for reacting to events received;
GenFSM is an OTP behaviour that encapsulates common FSM
functionalities.
## Example
Below is an example of a GenFSM that runs a very simple minded
coffee vending machine (CVM). The CVM treats all coins the same. If
you press the request button then the CVM will brew coffee, if you
have paid enough coins; if not, it will wait until you have inserted
enough coins and then it will instantly brew the coffee, since you
had already pressed the request button. As we told you - a very
simple minded CVM! And greedy too. If you insert more coins than you
need it will gladly eat them until you press the request button.
We will leave it to the service-minded reader to improve the way the CVM
works - we hereby declare a full disclaimer for any lawsuits that the
behaviour of the CVM in its original state might encur.
defmodule MyFsm do
use GenFSM.Behaviour
# keeping track of what is going on inside the CVM.
# 3 is the target price for a cup of coffee
defrecord StateData, coins: 0, price: 3
# API functions
def start_link() do
:gen_fsm.start_link({:local, :cvm}, __MODULE__, [], [])
end
def insert_coin() do
:gen_fsm.send_event(:cvm, :coin)
end
def request_coffee() do
:gen_fsm.send_event(:cvm, :request_coffee)
end
# Callbacks
def init(_args) do
{ :ok, :short_paid, StateData.new }
end
def short_paid(:coin, state_data = StateData[coins: c, price: p]) when c + 1 < p do
{ :next_state, :short_paid, state_data.coins(c + 1) }
end
def short_paid(:coin, state_data) do
{ :next_state, :paid_in_full, &state_data.update_coins(&1 + 1) }
end
def short_paid(:request_coffee, state_data) do
{ :next_state, :requested_short_paid, state_data }
end
def requested_short_paid(:request_coffee, state_data) do
{ :next_state, :requested_short_paid, state_data }
end
def requested_short_paid(:coin, state_data=StateData[coins: c, price: p]) when c+1 < p do
{ :next_state, :requested_short_paid, state_data.coins(c + 1) }
end
def requested_short_paid(:coin, _state_data) do
IO.puts "Here's your coffee!"
{ :next_state, :short_paid, StateData.new }
end
def paid_in_full(:coin, state_data) do
{ :next_state, :paid_in_full, &state_data.update_coins(&1 + 1) }
end
def paid_in_full(:request_coffee, _state_data) do
IO.puts "Here's your coffee!"
{ :next_state, :short_paid, StateData.new }
end
end
{ :ok, _pid } = MyFsm.start_link()
MyFsm.insert_coin
#=> :ok
MyFsm.insert_coin
#=> :ok
MyFsm.request_coffee
#=> :ok
MyFsm.insert_coin
#=> :ok
#=> Here's your coffee!
Notice we never call the GenFSM callbacks directly; they are called by
OTP whenever we interact with the server throught the API. `send_event` is
asynchronous, whereas `sync_send_event` is synchronous. For
a GenFSM, the different values a callback can return depend
on the type of callback.
State handling returns for `send_event` callbacks:
{ :next_state, next_state_name, new_state_data }
{ :next_state, next_state_name, new_state_data, timeout }
{ :next_state, next_state_name, new_state_data, :hibernate }
{ :stop, reason, new_state_data }
State handling returns for `sync_send_event` callbacks:
{ :reply, reply, next_state_name, new_state_data }
{ :reply, reply, next_state_name, new_state_data, timeout }
{ :reply, reply, next_state_name, new_state_data, :hibernate }
{ :next_state, next_state_name, new_state_data }
{ :next_state, next_state_name, new_state_data, timeout }
{ :next_state, next_state_name, new_state_data, :hibernate }
{ :stop, reason, reply, new_state_data }
{ :stop, reason, new_state_date }
There are 6 callbacks required to be implemented in a GenFsm plus 1
or 2 for each state. The `GenFSM.Behaviour` module defines
`handle_sync_event`, `handle_info`, `terminate` and `code_change`
for you. The list of callbacks are:
* `init(args)` - invoked when the FSM is started;
* `handle_sync_event(event, from, state_name, state_data)` - invoked to
handle `sync_send_all_state_event` messages;
* `handle_event(event, state_name, state_data)` - invoked to handle
`send_all_state_event` messages;
* `handle_info(msg, state_name, state_data)` - handle all other
messages which are normally received by processes;
* `terminate(reason, state_name, state_data)` - called when the FSM
is about to terminate, useful for cleaning up;
* `code_change(old_vsn, state, extra)` - called when the application
code is being upgraded live (hot code swap);
Unlike `GenServer` and `GenEvent`, the callback `init/1` is not
implemented by default, as it requires the next state to be returned.
For each state you need to define either or both of these:
* `state_name(event, state_data)` - invoked to handle
`send_event` messages;
* `state_name(event, from, state_data)`- invoked to handle
`sync_send_event` messages;
If you send asynchronous events you only need to implement the
`state_name/2` variant and vice-versa for synchronous events and
`state_name/3`. Keep in mind that if you mix `send_event` and
`sync_send_event` the best thing to do is to implement both
callbacks for all states.
Starting and sending messages to the GenFSM is done via Erlang's
`:gen_fsm` module. For more information, please refer to the
following:
* http://www.erlang.org/doc/man/gen_fsm.html
* http://www.erlang.org/doc/design_principles/fsm.html
* http://learnyousomeerlang.com/finite-state-machines
"""
@doc false
defmacro __using__(_) do
quote location: :keep do
@behavior :gen_fsm
@doc false
def handle_event(event, state_name, state_data) do
{ :stop, {:bad_event, state_name, event}, state_data }
end
@doc false
def handle_sync_event(event, from, state_name, state_data) do
{ :stop, {:bad_sync_event, state_name, event}, state_data }
end
@doc false
def handle_info(_msg, state_name, state_data) do
{ :next_state, state_name, state_data }
end
@doc false
def terminate(_reason, _state_name, _state_data) do
:ok
end
@doc false
def code_change(_old, state_name, state_data, _extra) do
{ :ok, state_name, state_data }
end
defoverridable [handle_event: 3, handle_sync_event: 4,
handle_info: 3, terminate: 3, code_change: 4]
end
end
end | lib/elixir/lib/gen_fsm/behaviour.ex | 0.802052 | 0.687374 | behaviour.ex | starcoder |
defmodule Ash.Api.Dsl do
@resource %Ash.Dsl.Entity{
name: :resource,
describe: "A reference to a resource",
target: Ash.Api.ResourceReference,
args: [:resource],
examples: [
"resource MyApp.User"
],
schema: [
resource: [
type: :atom,
required: true,
doc: "The module of the resource"
]
]
}
@execution %Ash.Dsl.Section{
name: :execution,
describe: "Options for how requests are executed using this Api",
examples: [
"""
execution do
timeout 30_000
end
"""
],
schema: [
timeout: [
type: :timeout,
doc: "The default timeout to use for requests using this API.",
default: :infinity
]
]
}
@resources %Ash.Dsl.Section{
name: :resources,
describe: "List the resources present in this API",
examples: [
"""
resources do
resource MyApp.User
resource MyApp.Post
resource MyApp.Comment
end
"""
],
schema: [
allow_unregistered?: [
type: :boolean,
default: false,
doc: """
This is still experimental, but will be supported if you run into any issues.
By default, an api will only work with resources that are explicitly included in the provided registry. In order to separate your
application into multiple domains, you may wish to "mix and match" your resources across contexts. Specifying this option allows you
to refer to resources in different apis in your resources, and allows providing any resource to api actions (to facilitate that requirement).
Be sure to remove the Ash.Registry.ResourceValidations extension from your registry as well.
"""
],
registry: [
type: {:behaviour, Ash.Registry},
doc: """
Allows declaring that only the modules in a certain registry should be allowed to work with this Api.
This option is ignored if any explicit resources are included in the api, so everything is either in the registry
or in the api. See the docs on `Ash.Registry` for what the registry is used for.
To optimize for compile times, you can place the connection from the api to the registry in application configuration.
To accomplish this:
1. Configure an `otp_app` when using your api, e.g `use Ash.Api, otp_app: :my_app`
2. Add application config to set up the connection
```elixir
config :my_app, MyApp.Api,
resources: [
registry: MyApp.Api.Registry
]
```
"""
]
],
modules: [:registry],
deprecations: [
resource: """
Please define your resources in an `Ash.Registry`. For example:
# my_app/my_api/registry.ex
defmodule MyApp.MyApi.Registry do
use Ash.Registry,
extensions: [Ash.Registry.ResourceValidations]
entries do
entry MyApp.Post
entry MyApp.Comment
end
end
# In your api module
resources do
registry MyApp.MyApi.Registry
end
"""
],
entities: [
@resource
]
}
@sections [@resources, @execution]
@moduledoc """
A small DSL for declaring APIs
Apis are the entrypoints for working with your resources.
Apis may optionally include a list of resources, in which case they can be
used as an `Ash.Registry` in various places. This is for backwards compatibility,
but if at all possible you should define an `Ash.Registry` if you are using an extension
that requires a list of resources. For example, most extensions look for two application
environment variables called `:ash_apis` and `:ash_registries` to find any potential registries
# Table of Contents
#{Ash.Dsl.Extension.doc_index(@sections)}
#{Ash.Dsl.Extension.doc(@sections)}
"""
use Ash.Dsl.Extension, sections: @sections
end | lib/ash/api/dsl.ex | 0.850918 | 0.677454 | dsl.ex | starcoder |
defmodule AWS.STS do
@moduledoc """
AWS Security Token Service
AWS Security Token Service (STS) enables you to request temporary,
limited-privilege credentials for AWS Identity and Access Management (IAM) users
or for users that you authenticate (federated users).
This guide provides descriptions of the STS API. For more information about
using this service, see [Temporary Security Credentials](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
"""
@doc """
Returns a set of temporary security credentials that you can use to access AWS
resources that you might not normally have access to.
These temporary credentials consist of an access key ID, a secret access key,
and a security token. Typically, you use `AssumeRole` within your account or for
cross-account access. For a comparison of `AssumeRole` with other API operations
that produce temporary credentials, see [Requesting Temporary Security Credentials](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
and [Comparing the AWS STS API operations](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
in the *IAM User Guide*.
You cannot use AWS account root user credentials to call `AssumeRole`. You must
use credentials for an IAM user or an IAM role to call `AssumeRole`.
For cross-account access, imagine that you own multiple accounts and need to
access resources in each account. You could create long-term credentials in each
account to access those resources. However, managing all those credentials and
remembering which one can access which account can be time consuming. Instead,
you can create one set of long-term credentials in one account. Then use
temporary security credentials to access all the other accounts by assuming
roles in those accounts. For more information about roles, see [IAM Roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) in the
*IAM User Guide*.
## Session Duration
By default, the temporary security credentials created by `AssumeRole` last for
one hour. However, you can use the optional `DurationSeconds` parameter to
specify the duration of your session. You can provide a value from 900 seconds
(15 minutes) up to the maximum session duration setting for the role. This
setting can have a value from 1 hour to 12 hours. To learn how to view the
maximum value for your role, see [View the Maximum Session Duration Setting for a
Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
in the *IAM User Guide*. The maximum session duration limit applies when you use
the `AssumeRole*` API operations or the `assume-role*` CLI commands. However the
limit does not apply when you use those operations to create a console URL. For
more information, see [Using IAM Roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in
the *IAM User Guide*.
## Permissions
The temporary security credentials created by `AssumeRole` can be used to make
API calls to any AWS service with the following exception: You cannot call the
AWS STS `GetFederationToken` or `GetSessionToken` API operations.
(Optional) You can pass inline or managed [session policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
to this operation. You can pass a single JSON policy document to use as an
inline session policy. You can also specify up to 10 managed policies to use as
managed session policies. The plain text that you use for both inline and
managed session policies can't exceed 2,048 characters. Passing policies to this
operation returns new temporary credentials. The resulting session's permissions
are the intersection of the role's identity-based policy and the session
policies. You can use the role's temporary credentials in subsequent AWS API
calls to access resources in the account that owns the role. You cannot use
session policies to grant more permissions than those allowed by the
identity-based policy of the role that is being assumed. For more information,
see [Session Policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
in the *IAM User Guide*.
To assume a role from a different account, your AWS account must be trusted by
the role. The trust relationship is defined in the role's trust policy when the
role is created. That trust policy states which accounts are allowed to delegate
that access to users in the account.
A user who wants to access a role in a different account must also have
permissions that are delegated from the user account administrator. The
administrator must attach a policy that allows the user to call `AssumeRole` for
the ARN of the role in the other account. If the user is in the same account as
the role, then you can do either of the following:
* Attach a policy to the user (identical to the previous user in a
different account).
* Add the user as a principal directly in the role's trust policy.
In this case, the trust policy acts as an IAM resource-based policy. Users in
the same account as the role do not need explicit permission to assume the role.
For more information about trust policies and resource-based policies, see [IAM Policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
in the *IAM User Guide*.
## Tags
(Optional) You can pass tag key-value pairs to your session. These tags are
called session tags. For more information about session tags, see [Passing Session Tags in
STS](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in
the *IAM User Guide*.
An administrator must grant you the permissions necessary to pass session tags.
The administrator can also create granular permissions to allow you to pass only
specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access
Control](https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
in the *IAM User Guide*.
You can set the session tags as transitive. Transitive tags persist during role
chaining. For more information, see [Chaining Roles with Session Tags](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
in the *IAM User Guide*.
## Using MFA with AssumeRole
(Optional) You can include multi-factor authentication (MFA) information when
you call `AssumeRole`. This is useful for cross-account scenarios to ensure that
the user that assumes the role has been authenticated with an AWS MFA device. In
that scenario, the trust policy of the role being assumed includes a condition
that tests for MFA authentication. If the caller does not include valid MFA
information, the request to assume the role is denied. The condition in a trust
policy that tests for MFA authentication might look like the following example.
`"Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}`
For more information, see [Configuring MFA-Protected API Access](https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
in the *IAM User Guide* guide.
To use MFA with `AssumeRole`, you pass values for the `SerialNumber` and
`TokenCode` parameters. The `SerialNumber` value identifies the user's hardware
or virtual MFA device. The `TokenCode` is the time-based one-time password
(TOTP) that the MFA device produces.
"""
def assume_role(client, input, options \\ []) do
request(client, "AssumeRole", input, options)
end
@doc """
Returns a set of temporary security credentials for users who have been
authenticated via a SAML authentication response.
This operation provides a mechanism for tying an enterprise identity store or
directory to role-based AWS access without user-specific credentials or
configuration. For a comparison of `AssumeRoleWithSAML` with the other API
operations that produce temporary credentials, see [Requesting Temporary Security
Credentials](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
and [Comparing the AWS STS API operations](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
in the *IAM User Guide*.
The temporary security credentials returned by this operation consist of an
access key ID, a secret access key, and a security token. Applications can use
these temporary security credentials to sign calls to AWS services.
## Session Duration
By default, the temporary security credentials created by `AssumeRoleWithSAML`
last for one hour. However, you can use the optional `DurationSeconds` parameter
to specify the duration of your session. Your role session lasts for the
duration that you specify, or until the time specified in the SAML
authentication response's `SessionNotOnOrAfter` value, whichever is shorter. You
can provide a `DurationSeconds` value from 900 seconds (15 minutes) up to the
maximum session duration setting for the role. This setting can have a value
from 1 hour to 12 hours. To learn how to view the maximum value for your role,
see [View the Maximum Session Duration Setting for a Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
in the *IAM User Guide*. The maximum session duration limit applies when you use
the `AssumeRole*` API operations or the `assume-role*` CLI commands. However the
limit does not apply when you use those operations to create a console URL. For
more information, see [Using IAM Roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in
the *IAM User Guide*.
## Permissions
The temporary security credentials created by `AssumeRoleWithSAML` can be used
to make API calls to any AWS service with the following exception: you cannot
call the STS `GetFederationToken` or `GetSessionToken` API operations.
(Optional) You can pass inline or managed [session policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
to this operation. You can pass a single JSON policy document to use as an
inline session policy. You can also specify up to 10 managed policies to use as
managed session policies. The plain text that you use for both inline and
managed session policies can't exceed 2,048 characters. Passing policies to this
operation returns new temporary credentials. The resulting session's permissions
are the intersection of the role's identity-based policy and the session
policies. You can use the role's temporary credentials in subsequent AWS API
calls to access resources in the account that owns the role. You cannot use
session policies to grant more permissions than those allowed by the
identity-based policy of the role that is being assumed. For more information,
see [Session Policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
in the *IAM User Guide*.
Calling `AssumeRoleWithSAML` does not require the use of AWS security
credentials. The identity of the caller is validated by using keys in the
metadata document that is uploaded for the SAML provider entity for your
identity provider.
Calling `AssumeRoleWithSAML` can result in an entry in your AWS CloudTrail logs.
The entry includes the value in the `NameID` element of the SAML assertion. We
recommend that you use a `NameIDType` that is not associated with any personally
identifiable information (PII). For example, you could instead use the
persistent identifier (`urn:oasis:names:tc:SAML:2.0:nameid-format:persistent`).
## Tags
(Optional) You can configure your IdP to pass attributes into your SAML
assertion as session tags. Each session tag consists of a key name and an
associated value. For more information about session tags, see [Passing Session Tags in
STS](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in
the *IAM User Guide*.
You can pass up to 50 session tags. The plain text session tag keys can’t exceed
128 characters and the values can’t exceed 256 characters. For these and
additional limits, see [IAM and STS Character Limits](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
in the *IAM User Guide*.
An AWS conversion compresses the passed session policies and session tags into a
packed binary format that has a separate limit. Your request can fail for this
limit even if your plain text meets the other requirements. The
`PackedPolicySize` response element indicates by percentage how close the
policies and tags for your request are to the upper size limit.
You can pass a session tag with the same key as a tag that is attached to the
role. When you do, session tags override the role's tags with the same key.
An administrator must grant you the permissions necessary to pass session tags.
The administrator can also create granular permissions to allow you to pass only
specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access
Control](https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
in the *IAM User Guide*.
You can set the session tags as transitive. Transitive tags persist during role
chaining. For more information, see [Chaining Roles with Session Tags](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
in the *IAM User Guide*.
## SAML Configuration
Before your application can call `AssumeRoleWithSAML`, you must configure your
SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
you must use AWS Identity and Access Management (IAM) to create a SAML provider
entity in your AWS account that represents your identity provider. You must also
create an IAM role that specifies this SAML provider in its trust policy.
For more information, see the following resources:
* [About SAML 2.0-based Federation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
in the *IAM User Guide*.
* [Creating SAML Identity Providers](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
in the *IAM User Guide*.
* [Configuring a Relying Party and Claims](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
in the *IAM User Guide*.
* [Creating a Role for SAML 2.0 Federation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
in the *IAM User Guide*.
"""
def assume_role_with_s_a_m_l(client, input, options \\ []) do
request(client, "AssumeRoleWithSAML", input, options)
end
@doc """
Returns a set of temporary security credentials for users who have been
authenticated in a mobile or web application with a web identity provider.
Example providers include Amazon Cognito, Login with Amazon, Facebook, Google,
or any OpenID Connect-compatible identity provider.
For mobile applications, we recommend that you use Amazon Cognito. You can use
Amazon Cognito with the [AWS SDK for iOS Developer Guide](http://aws.amazon.com/sdkforios/) and the [AWS SDK for Android Developer Guide](http://aws.amazon.com/sdkforandroid/) to uniquely identify a user. You
can also supply the user with a consistent identity throughout the lifetime of
an application.
To learn more about Amazon Cognito, see [Amazon Cognito Overview](https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
in *AWS SDK for Android Developer Guide* and [Amazon Cognito Overview](https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
in the *AWS SDK for iOS Developer Guide*.
Calling `AssumeRoleWithWebIdentity` does not require the use of AWS security
credentials. Therefore, you can distribute an application (for example, on
mobile devices) that requests temporary security credentials without including
long-term AWS credentials in the application. You also don't need to deploy
server-based proxy services that use long-term AWS credentials. Instead, the
identity of the caller is validated by using a token from the web identity
provider. For a comparison of `AssumeRoleWithWebIdentity` with the other API
operations that produce temporary credentials, see [Requesting Temporary Security
Credentials](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
and [Comparing the AWS STS API operations](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
in the *IAM User Guide*.
The temporary security credentials returned by this API consist of an access key
ID, a secret access key, and a security token. Applications can use these
temporary security credentials to sign calls to AWS service API operations.
## Session Duration
By default, the temporary security credentials created by
`AssumeRoleWithWebIdentity` last for one hour. However, you can use the optional
`DurationSeconds` parameter to specify the duration of your session. You can
provide a value from 900 seconds (15 minutes) up to the maximum session duration
setting for the role. This setting can have a value from 1 hour to 12 hours. To
learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a
Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
in the *IAM User Guide*. The maximum session duration limit applies when you use
the `AssumeRole*` API operations or the `assume-role*` CLI commands. However the
limit does not apply when you use those operations to create a console URL. For
more information, see [Using IAM Roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in
the *IAM User Guide*.
## Permissions
The temporary security credentials created by `AssumeRoleWithWebIdentity` can be
used to make API calls to any AWS service with the following exception: you
cannot call the STS `GetFederationToken` or `GetSessionToken` API operations.
(Optional) You can pass inline or managed [session policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
to this operation. You can pass a single JSON policy document to use as an
inline session policy. You can also specify up to 10 managed policies to use as
managed session policies. The plain text that you use for both inline and
managed session policies can't exceed 2,048 characters. Passing policies to this
operation returns new temporary credentials. The resulting session's permissions
are the intersection of the role's identity-based policy and the session
policies. You can use the role's temporary credentials in subsequent AWS API
calls to access resources in the account that owns the role. You cannot use
session policies to grant more permissions than those allowed by the
identity-based policy of the role that is being assumed. For more information,
see [Session Policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
in the *IAM User Guide*.
## Tags
(Optional) You can configure your IdP to pass attributes into your web identity
token as session tags. Each session tag consists of a key name and an associated
value. For more information about session tags, see [Passing Session Tags in STS](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in
the *IAM User Guide*.
You can pass up to 50 session tags. The plain text session tag keys can’t exceed
128 characters and the values can’t exceed 256 characters. For these and
additional limits, see [IAM and STS Character Limits](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
in the *IAM User Guide*.
An AWS conversion compresses the passed session policies and session tags into a
packed binary format that has a separate limit. Your request can fail for this
limit even if your plain text meets the other requirements. The
`PackedPolicySize` response element indicates by percentage how close the
policies and tags for your request are to the upper size limit.
You can pass a session tag with the same key as a tag that is attached to the
role. When you do, the session tag overrides the role tag with the same key.
An administrator must grant you the permissions necessary to pass session tags.
The administrator can also create granular permissions to allow you to pass only
specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access
Control](https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
in the *IAM User Guide*.
You can set the session tags as transitive. Transitive tags persist during role
chaining. For more information, see [Chaining Roles with Session Tags](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
in the *IAM User Guide*.
## Identities
Before your application can call `AssumeRoleWithWebIdentity`, you must have an
identity token from a supported identity provider and create a role that the
application can assume. The role that your application assumes must trust the
identity provider that is associated with the identity token. In other words,
the identity provider must be specified in the role's trust policy.
Calling `AssumeRoleWithWebIdentity` can result in an entry in your AWS
CloudTrail logs. The entry includes the
[Subject](http://openid.net/specs/openid-connect-core-1_0.html#Claims) of the provided Web Identity Token. We recommend that you avoid using any personally
identifiable information (PII) in this field. For example, you could instead use
a GUID or a pairwise identifier, as [suggested in the OIDC
specification](http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
For more information about how to use web identity federation and the
`AssumeRoleWithWebIdentity` API, see the following resources:
* [Using Web Identity Federation API Operations for Mobile Apps](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
and [Federation Through a Web-based Identity Provider](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
* [ Web Identity Federation Playground](https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
Walk through the process of authenticating through Login with Amazon, Facebook,
or Google, getting temporary security credentials, and then using those
credentials to make a request to AWS.
* [AWS SDK for iOS Developer Guide](http://aws.amazon.com/sdkforios/) and [AWS SDK for Android Developer Guide](http://aws.amazon.com/sdkforandroid/). These toolkits contain sample apps
that show how to invoke the identity providers. The toolkits then show how to
use the information from these providers to get and use temporary security
credentials.
* [Web Identity Federation with Mobile Applications](http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
This article discusses web identity federation and shows an example of how to
use web identity federation to get access to content in Amazon S3.
"""
def assume_role_with_web_identity(client, input, options \\ []) do
request(client, "AssumeRoleWithWebIdentity", input, options)
end
@doc """
Decodes additional information about the authorization status of a request from
an encoded message returned in response to an AWS request.
For example, if a user is not authorized to perform an operation that he or she
has requested, the request returns a `Client.UnauthorizedOperation` response (an
HTTP 403 response). Some AWS operations additionally return an encoded message
that can provide details about this authorization failure.
Only certain AWS operations return an encoded authorization message. The
documentation for an individual operation indicates whether that operation
returns an encoded message in addition to returning an HTTP code.
The message is encoded because the details of the authorization status can
constitute privileged information that the user who requested the operation
should not see. To decode an authorization status message, a user must be
granted permissions via an IAM policy to request the
`DecodeAuthorizationMessage` (`sts:DecodeAuthorizationMessage`) action.
The decoded message includes the following type of information:
* Whether the request was denied due to an explicit deny or due to
the absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or
Denied](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
in the *IAM User Guide*.
* The principal who made the request.
* The requested action.
* The requested resource.
* The values of condition keys in the context of the user's request.
"""
def decode_authorization_message(client, input, options \\ []) do
request(client, "DecodeAuthorizationMessage", input, options)
end
@doc """
Returns the account identifier for the specified access key ID.
Access keys consist of two parts: an access key ID (for example,
`AKIAIOSFODNN7EXAMPLE`) and a secret access key (for example,
`<KEY>`). For more information about access
keys, see [Managing Access Keys for IAM Users](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
in the *IAM User Guide*.
When you pass an access key ID to this operation, it returns the ID of the AWS
account to which the keys belong. Access key IDs beginning with `AKIA` are
long-term credentials for an IAM user or the AWS account root user. Access key
IDs beginning with `ASIA` are temporary credentials that are created using STS
operations. If the account in the response belongs to you, you can sign in as
the root user and review your root user access keys. Then, you can pull a
[credentials report](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html)
to learn which IAM user owns the keys. To learn who requested the temporary
credentials for an `ASIA` access key, view the STS events in your [CloudTrail logs](https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html)
in the *IAM User Guide*.
This operation does not indicate the state of the access key. The key might be
active, inactive, or deleted. Active keys might not have permissions to perform
an operation. Providing a deleted access key might return an error that the key
doesn't exist.
"""
def get_access_key_info(client, input, options \\ []) do
request(client, "GetAccessKeyInfo", input, options)
end
@doc """
Returns details about the IAM user or role whose credentials are used to call
the operation.
No permissions are required to perform this operation. If an administrator adds
a policy to your IAM user or role that explicitly denies access to the
`sts:GetCallerIdentity` action, you can still perform this operation.
Permissions are not required because the same information is returned when an
IAM user or role is denied access. To view an example response, see [I Am Not Authorized to Perform:
iam:DeleteVirtualMFADevice](https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa)
in the *IAM User Guide*.
"""
def get_caller_identity(client, input, options \\ []) do
request(client, "GetCallerIdentity", input, options)
end
@doc """
Returns a set of temporary security credentials (consisting of an access key ID,
a secret access key, and a security token) for a federated user.
A typical use is in a proxy application that gets temporary security credentials
on behalf of distributed applications inside a corporate network. You must call
the `GetFederationToken` operation using the long-term security credentials of
an IAM user. As a result, this call is appropriate in contexts where those
credentials can be safely stored, usually in a server-based application. For a
comparison of `GetFederationToken` with the other API operations that produce
temporary credentials, see [Requesting Temporary Security Credentials](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
and [Comparing the AWS STS API operations](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
in the *IAM User Guide*.
You can create a mobile-based or browser-based app that can authenticate users
using a web identity provider like Login with Amazon, Facebook, Google, or an
OpenID Connect-compatible identity provider. In this case, we recommend that you
use [Amazon Cognito](http://aws.amazon.com/cognito/) or `AssumeRoleWithWebIdentity`. For more information, see [Federation Through a
Web-based Identity
Provider](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
in the *IAM User Guide*.
You can also call `GetFederationToken` using the security credentials of an AWS
account root user, but we do not recommend it. Instead, we recommend that you
create an IAM user for the purpose of the proxy application. Then attach a
policy to the IAM user that limits federated users to only the actions and
resources that they need to access. For more information, see [IAM Best Practices](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
in the *IAM User Guide*.
## Session duration
The temporary credentials are valid for the specified duration, from 900 seconds
(15 minutes) up to a maximum of 129,600 seconds (36 hours). The default session
duration is 43,200 seconds (12 hours). Temporary credentials that are obtained
by using AWS account root user credentials have a maximum duration of 3,600
seconds (1 hour).
## Permissions
You can use the temporary credentials created by `GetFederationToken` in any AWS
service except the following:
* You cannot call any IAM operations using the AWS CLI or the AWS
API.
* You cannot call any STS operations except `GetCallerIdentity`.
You must pass an inline or managed [session policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
to this operation. You can pass a single JSON policy document to use as an
inline session policy. You can also specify up to 10 managed policies to use as
managed session policies. The plain text that you use for both inline and
managed session policies can't exceed 2,048 characters.
Though the session policy parameters are optional, if you do not pass a policy,
then the resulting federated user session has no permissions. When you pass
session policies, the session permissions are the intersection of the IAM user
policies and the session policies that you pass. This gives you a way to further
restrict the permissions for a federated user. You cannot use session policies
to grant more permissions than those that are defined in the permissions policy
of the IAM user. For more information, see [Session Policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
in the *IAM User Guide*. For information about using `GetFederationToken` to
create temporary security credentials, see [GetFederationToken—Federation Through a Custom Identity
Broker](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
You can use the credentials to access a resource that has a resource-based
policy. If that policy specifically references the federated user session in the
`Principal` element of the policy, the session has the permissions allowed by
the policy. These permissions are granted in addition to the permissions granted
by the session policies.
## Tags
(Optional) You can pass tag key-value pairs to your session. These are called
session tags. For more information about session tags, see [Passing Session Tags in STS](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
in the *IAM User Guide*.
An administrator must grant you the permissions necessary to pass session tags.
The administrator can also create granular permissions to allow you to pass only
specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access
Control](https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
in the *IAM User Guide*.
Tag key–value pairs are not case sensitive, but case is preserved. This means
that you cannot have separate `Department` and `department` tag keys. Assume
that the user that you are federating has the `Department`=`Marketing` tag and
you pass the `department`=`engineering` session tag. `Department` and
`department` are not saved as separate tags, and the session tag passed in the
request takes precedence over the user tag.
"""
def get_federation_token(client, input, options \\ []) do
request(client, "GetFederationToken", input, options)
end
@doc """
Returns a set of temporary credentials for an AWS account or IAM user.
The credentials consist of an access key ID, a secret access key, and a security
token. Typically, you use `GetSessionToken` if you want to use MFA to protect
programmatic calls to specific AWS API operations like Amazon EC2
`StopInstances`. MFA-enabled IAM users would need to call `GetSessionToken` and
submit an MFA code that is associated with their MFA device. Using the temporary
security credentials that are returned from the call, IAM users can then make
programmatic calls to API operations that require MFA authentication. If you do
not supply a correct MFA code, then the API returns an access denied error. For
a comparison of `GetSessionToken` with the other API operations that produce
temporary credentials, see [Requesting Temporary Security Credentials](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
and [Comparing the AWS STS API operations](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
in the *IAM User Guide*.
## Session Duration
The `GetSessionToken` operation must be called by using the long-term AWS
security credentials of the AWS account root user or an IAM user. Credentials
that are created by IAM users are valid for the duration that you specify. This
duration can range from 900 seconds (15 minutes) up to a maximum of 129,600
seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials
based on account credentials can range from 900 seconds (15 minutes) up to 3,600
seconds (1 hour), with a default of 1 hour.
## Permissions
The temporary security credentials created by `GetSessionToken` can be used to
make API calls to any AWS service with the following exceptions:
* You cannot call any IAM API operations unless MFA authentication
information is included in the request.
* You cannot call any STS API *except* `AssumeRole` or
`GetCallerIdentity`.
We recommend that you do not call `GetSessionToken` with AWS account root user
credentials. Instead, follow our [best practices](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
by creating one or more IAM users, giving them the necessary permissions, and
using IAM users for everyday interaction with AWS.
The credentials that are returned by `GetSessionToken` are based on permissions
associated with the user whose credentials were used to call the operation. If
`GetSessionToken` is called using AWS account root user credentials, the
temporary credentials have root user permissions. Similarly, if
`GetSessionToken` is called using the credentials of an IAM user, the temporary
credentials have the same permissions as the IAM user.
For more information about using `GetSessionToken` to create temporary
credentials, go to [Temporary Credentials for Users in Untrusted Environments](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
in the *IAM User Guide*.
"""
def get_session_token(client, input, options \\ []) do
request(client, "GetSessionToken", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "sts"}
host = build_host("sts", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-www-form-urlencoded"}
]
input = Map.merge(input, %{"Action" => action, "Version" => "2011-06-15"})
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :query)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :xml)
end
end | lib/aws/generated/sts.ex | 0.930648 | 0.60399 | sts.ex | starcoder |
defmodule ExRabbitMQ.AST.Consumer.GenServer do
@moduledoc """
AST holding module for the consumer behaviour when the surrounding consumer is a GenServer.
"""
@doc """
Produces part of the AST for the consumer behaviour when the consumer is a GenServer.
It holds GenServer handle_info callbacks and a few default implementations.
Specifically, it handles the basic_deliver and basic_cancel AMQP events.
It also responds to connection and channel events, trying to keep a channel open when a connection is available.
"""
def ast do
quote location: :keep do
alias ExRabbitMQ.Config.Environment, as: XRMQEnvironmentConfig
alias ExRabbitMQ.State, as: XRMQState
@impl true
def handle_info({:basic_deliver, payload, meta}, state) do
if XRMQEnvironmentConfig.accounting_enabled() and is_binary(payload) do
payload
|> byte_size()
|> Kernel./(1_024)
|> Float.round()
|> trunc()
|> XRMQState.add_kb_of_messages_seen_so_far()
end
callback_result = xrmq_basic_deliver(payload, meta, state)
if XRMQEnvironmentConfig.accounting_enabled() and XRMQState.hibernate?(),
do: xrmq_on_hibernation_threshold_reached(callback_result),
else: callback_result
end
@impl true
def handle_info({:basic_cancel, cancellation_info}, state) do
xrmq_basic_cancel(cancellation_info, state)
end
@impl true
def handle_info({:xrmq_connection, {:new, connection}}, state) do
state = xrmq_on_connection_opened(connection, state)
{:noreply, state}
end
@impl true
def handle_info({:xrmq_connection, {:open, connection}}, state) do
case xrmq_open_channel_setup_consume(state) do
{:ok, state} ->
state = xrmq_on_connection_reopened(connection, state)
state = xrmq_flush_buffered_messages(state)
{:noreply, state}
{:error, reason, state} ->
case xrmq_on_connection_reopened_consume_failed(reason, state) do
{:cont, state} -> {:noreply, state}
{:halt, reason, state} -> {:stop, reason, state}
end
end
end
@impl true
def handle_info({:xrmq_connection, {:closed, _}}, state) do
XRMQState.set_connection_status(:disconnected)
state = xrmq_on_connection_closed(state)
# WE WILL CONTINUE HANDLING THIS EVENT WHEN WE HANDLE THE CHANNEL DOWN EVENT
{:noreply, state}
end
@impl true
def handle_info({:DOWN, ref, :process, pid, reason}, state) do
case XRMQState.get_channel_info() do
{_, ^ref} ->
XRMQState.set_channel_info(nil, nil)
case xrmq_open_channel_setup_consume(state) do
{:ok, state} ->
state = xrmq_flush_buffered_messages(state)
{:noreply, state}
{:error, reason, state} ->
case xrmq_on_channel_reopened_consume_failed(reason, state) do
{:cont, state} -> {:noreply, state}
{:halt, reason, state} -> {:stop, reason, state}
end
end
_ ->
send(self(), {{:DOWN, ref, :process, pid, reason}})
{:noreply, state}
end
end
@impl true
def handle_info({:xrmq_try_init, opts}, state), do: xrmq_try_init_consumer(opts, state)
@impl true
def handle_continue({:xrmq_try_init, opts, continuation}, state) do
case xrmq_try_init_consumer(opts, state) do
result when continuation === nil -> result
{action, state} -> {action, state, continuation}
error -> error
end
end
end
end
end | lib/ex_rabbit_m_q/a_s_t/consumer/gen_server.ex | 0.77193 | 0.411052 | gen_server.ex | starcoder |
defmodule Cldr.Calendar.Base.Month do
@moduledoc false
alias Cldr.Calendar.Config
alias Cldr.Calendar.Base
alias Calendar.ISO
alias Cldr.Math
@days_in_week 7
@quarters_in_year 4
@months_in_quarter 3
@weeks_in_quarter 13
@iso_week_first_day 1
@iso_week_min_days 4
@january 1
defmacro __using__(options \\ []) do
quote bind_quoted: [options: options] do
@options options
@before_compile Cldr.Calendar.Compiler.Month
end
end
def valid_date?(year, month, day, %Config{month_of_year: 1}) do
Calendar.ISO.valid_date?(year, month, day)
end
def valid_date?(year, month, day, config) do
{year, month, day} = date_to_iso_date(year, month, day, config)
Calendar.ISO.valid_date?(year, month, day)
end
def year_of_era(year, config) do
{_, year} = Cldr.Calendar.start_end_gregorian_years(year, config)
Calendar.ISO.year_of_era(year)
end
def quarter_of_year(_year, month, _day, _config) do
Float.ceil(month / @months_in_quarter)
|> trunc
end
def month_of_year(_year, month, _day, _config) do
month
end
def week_of_year(year, month, day, %Config{day_of_week: :first} = config) do
this_day = date_to_iso_days(year, month, day, config)
first_day = date_to_iso_days(year, 1, 1, config)
week = div(this_day - first_day, @days_in_week) + 1
{year, week}
end
def week_of_year(year, month, day, config) do
iso_days = date_to_iso_days(year, month, day, config)
first_gregorian_day_of_year = Base.Week.first_gregorian_day_of_year(year, config)
last_gregorian_day_of_year = Base.Week.last_gregorian_day_of_year(year, config)
cond do
iso_days < first_gregorian_day_of_year ->
if Base.Week.long_year?(year - 1, config), do: {year - 1, 53}, else: {year - 1, 52}
iso_days > last_gregorian_day_of_year ->
{year + 1, 1}
true ->
week = div(iso_days - first_gregorian_day_of_year, @days_in_week) + 1
{year, week}
end
end
def iso_week_of_year(year, month, day) do
week_of_year(year, month, day, %Config{
day_of_week: @iso_week_first_day,
min_days_in_first_week: @iso_week_min_days,
month_of_year: @january
})
end
def week_of_month(year, month, day, %Config{day_of_week: :first} = config) do
this_day = date_to_iso_days(year, month, day, config)
first_day = date_to_iso_days(year, month, 1, config)
week = div(this_day - first_day, @days_in_week) + 1
{month, week}
end
def week_of_month(year, month, day, config) do
{_year, week} = week_of_year(year, month, day, config)
{quarters, weeks_remaining_in_quarter} = Math.div_amod(week, @weeks_in_quarter)
month_in_quarter = Base.Week.month_from_weeks(weeks_remaining_in_quarter, config)
month = quarters * @months_in_quarter + month_in_quarter
week = weeks_remaining_in_quarter - Base.Week.weeks_from_months(month_in_quarter - 1, config)
{month, week}
end
def day_of_era(year, month, day, config) do
{year, month, day} = date_to_iso_date(year, month, day, config)
Calendar.ISO.day_of_era(year, month, day)
end
def day_of_year(year, month, day, config) do
{iso_year, iso_month, iso_day} = date_to_iso_date(year, month, day, config)
iso_days = Calendar.ISO.date_to_iso_days(iso_year, iso_month, iso_day)
iso_days - first_gregorian_day_of_year(year, config) + 1
end
def day_of_week(year, month, day, config) do
{year, month, day} = date_to_iso_date(year, month, day, config)
ISO.day_of_week(year, month, day)
end
def months_in_year(year, _config) do
Calendar.ISO.months_in_year(year)
end
def weeks_in_year(year, %Config{day_of_week: :first} = config) do
first_day = first_gregorian_day_of_year(year, config)
last_day = last_gregorian_day_of_year(year, config)
Float.ceil((last_day - first_day) / @days_in_week) |> trunc
end
def weeks_in_year(year, config) do
if Base.Week.long_year?(year, config) do
Base.Week.weeks_in_long_year()
else
Base.Week.weeks_in_normal_year()
end
end
def days_in_year(year, config) do
if leap_year?(year, config), do: 366, else: 365
end
def days_in_month(year, month, config) do
{iso_year, iso_month, _day} = date_to_iso_date(year, month, 1, config)
ISO.days_in_month(iso_year, iso_month)
end
def days_in_week do
@days_in_week
end
def days_in_week(_year, _week) do
@days_in_week
end
def year(year, config) do
calendar = config.calendar
last_month = calendar.months_in_year(year)
days_in_last_month = calendar.days_in_month(year, last_month)
with {:ok, start_date} <- Date.new(year, 1, 1, config.calendar),
{:ok, end_date} <- Date.new(year, last_month, days_in_last_month, config.calendar) do
Date.range(start_date, end_date)
end
end
def quarter(year, quarter, config) do
months_in_quarter = div(months_in_year(year, config), @quarters_in_year)
starting_month = months_in_quarter * (quarter - 1) + 1
starting_day = 1
ending_month = starting_month + months_in_quarter - 1
ending_day = days_in_month(year, ending_month, config)
with {:ok, start_date} <- Date.new(year, starting_month, starting_day, config.calendar),
{:ok, end_date} <- Date.new(year, ending_month, ending_day, config.calendar) do
Date.range(start_date, end_date)
end
end
def month(year, month, config) do
starting_day = 1
ending_day = days_in_month(year, month, config)
with {:ok, start_date} <- Date.new(year, month, starting_day, config.calendar),
{:ok, end_date} <- Date.new(year, month, ending_day, config.calendar) do
Date.range(start_date, end_date)
end
end
def week(year, week, %Config{day_of_week: :first} = config) do
first_day = first_gregorian_day_of_year(year, config)
last_day = last_gregorian_day_of_year(year, config)
start_day = first_day + (week - 1) * @days_in_week
end_day = min(start_day + @days_in_week - 1, last_day)
{year, month, day} = date_from_iso_days(start_day, config)
{:ok, start_date} = Date.new(year, month, day, config.calendar)
{year, month, day} = date_from_iso_days(end_day, config)
{:ok, end_date} = Date.new(year, month, day, config.calendar)
Date.range(start_date, end_date)
end
def week(year, week, config) do
starting_day =
Cldr.Calendar.Base.Week.first_gregorian_day_of_year(year, config) +
Cldr.Calendar.weeks_to_days(week - 1)
ending_day = starting_day + days_in_week() - 1
with {year, month, day} <- date_from_iso_days(starting_day, config),
{:ok, start_date} <- Date.new(year, month, day, config.calendar),
{year, month, day} <- date_from_iso_days(ending_day, config),
{:ok, end_date} <- Date.new(year, month, day, config.calendar) do
Date.range(start_date, end_date)
end
end
def plus(year, month, day, config, :years, years, options) do
new_year = year + years
coerce? = Keyword.get(options, :coerce, false)
{new_month, new_day} = Cldr.Calendar.month_day(new_year, month, day, config.calendar, coerce?)
{new_year, new_month, new_day}
end
def plus(year, month, day, config, :quarters, quarters, options) do
months = quarters * @months_in_quarter
plus(year, month, day, config, :months, months, options)
end
def plus(year, month, day, config, :months, months, options) do
months_in_year = months_in_year(year, config)
{year_increment, new_month} =
case Cldr.Math.div_amod(month + months, months_in_year) do
{year_increment, new_month} when new_month > 0 ->
{year_increment, new_month}
{year_increment, new_month} ->
{year_increment - 1, months_in_year + new_month}
end
new_year = year + year_increment
new_day =
if Keyword.get(options, :coerce, true) do
max_new_day = days_in_month(new_year, new_month, config)
min(day, max_new_day)
else
day
end
{new_year, new_month, new_day}
end
def first_gregorian_day_of_year(year, %Config{month_of_year: 1}) do
ISO.date_to_iso_days(year, 1, 1)
end
def first_gregorian_day_of_year(year, %Config{month_of_year: first_month} = config) do
{beginning_year, _} = Cldr.Calendar.start_end_gregorian_years(year, config)
ISO.date_to_iso_days(beginning_year, first_month, 1)
end
def last_gregorian_day_of_year(year, %Config{month_of_year: first_month} = config) do
{_, ending_year} = Cldr.Calendar.start_end_gregorian_years(year, config)
last_month = Math.amod(first_month - 1, ISO.months_in_year(ending_year))
last_day = ISO.days_in_month(ending_year, last_month)
ISO.date_to_iso_days(ending_year, last_month, last_day)
end
def leap_year?(year, %Config{month_of_year: 1}) do
ISO.leap_year?(year)
end
def leap_year?(year, config) do
days_in_year =
last_gregorian_day_of_year(year, config) - first_gregorian_day_of_year(year, config) + 1
days_in_year == 366
end
def date_to_iso_days(year, month, day, config) do
{days, _day_fraction} = naive_datetime_to_iso_days(year, month, day, 0, 0, 0, {0, 6}, config)
days
end
def date_from_iso_days(iso_day_number, config) do
{year, month, day, _, _, _, _} = naive_datetime_from_iso_days(iso_day_number, config)
{year, month, day}
end
def naive_datetime_from_iso_days(iso_day_number, config) when is_integer(iso_day_number) do
naive_datetime_from_iso_days({iso_day_number, {0, 6}}, config)
end
def naive_datetime_from_iso_days({days, day_fraction}, config) do
{year, month, day} = Calendar.ISO.date_from_iso_days(days)
{year, month, day} = date_from_iso_date(year, month, day, config)
{hour, minute, second, microsecond} = Calendar.ISO.time_from_day_fraction(day_fraction)
{year, month, day, hour, minute, second, microsecond}
end
def naive_datetime_to_iso_days(year, month, day, hour, minute, second, microsecond, config) do
{year, month, day} = date_to_iso_date(year, month, day, config)
ISO.naive_datetime_to_iso_days(year, month, day, hour, minute, second, microsecond)
end
@compile {:inline, date_to_iso_date: 4}
def date_to_iso_date(year, month, day, %Config{} = config) do
slide = slide(config)
{iso_year, iso_month} = add_month(year, month, -slide)
{iso_year, iso_month, day}
end
@compile {:inline, date_from_iso_date: 4}
def date_from_iso_date(iso_year, iso_month, day, %Config{} = config) do
slide = slide(config)
{year, month} = add_month(iso_year, iso_month, slide)
{year, month, day}
end
defp add_month(year, month, add) do
calculated_month = month + add
month = Math.amod(calculated_month, ISO.months_in_year(year))
cond do
calculated_month < 1 -> {year - 1, month}
calculated_month > ISO.months_in_year(year) -> {year + 1, month}
true -> {year, month}
end
end
@random_year 2000
defp slide(%Config{month_of_year: month} = config) do
{starts, _ends} = Cldr.Calendar.start_end_gregorian_years(@random_year, config)
direction = if starts < @random_year, do: -1, else: +1
month = Math.amod((month - 1) * direction, ISO.months_in_year(starts))
if month == 12, do: 0, else: month * direction * -1
end
end | lib/cldr/calendar/base/month.ex | 0.718496 | 0.505188 | month.ex | starcoder |
defmodule Vantagex.TechnicalIndicators do
@moduledoc """
Contains functions related to the technical indicator functions from Alpha Vantage
"""
import Vantagex.Helper
@doc """
Uses Alpha Vantage's SMA function.
Returns the simple moving average (SMA) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def sma(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:sma, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's EMA function.
Returns the exponential moving average (EMA) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def ema(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:ema, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's WMA function.
Returns the weighted moving average (WMA) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def wma(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:wma, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's DEMA function.
Returns the double exponential moving average (DEMA) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def dema(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:dema, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's TEMA function.
Returns the triple exponential moving average (TEMA) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def tema(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:tema, symbol, interval, time_period, series_type, opts)
end
@doc """
Alias for TEMA. Check `tema/4` for documentation.
"""
def t3(symbol, interval, time_period, series_type, opts \\ []), do: tema(symbol, interval, time_period, series_type, opts)
@doc """
Uses Alpha Vantage's MACD function.
Returns the moving average convergence / divergence (MACD) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `fastperiod` - positive integers are accepted. Defaults to 12
* `slowperiod` - positive integers are accepted. Defaults to 26
* `signalperiod` - positive integers are accepted. Defaults to 9
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def macd(symbol, interval, series_type, opts \\ []) do
type_3_function(:macd, symbol, interval, series_type, opts)
end
@doc """
Uses Alpha Vantage's MACDEXT function.
Returns the moving average convergence / divergence (MACDEXT) values with controllable moving average type.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `fastperiod` - positive integers are accepted. Defaults to 12
* `slowperiod` - positive integers are accepted. Defaults to 26
* `signalperiod` - positive integers are accepted. Defaults to 9
* `fastmatype` - moving average type for the faster moving average. Defaults to 0. Integers 0-8 accepted.
MA_TYPES_MAPPING
* `slowmatype` - moving average type for the slower moving average. Defaults to 0. Integers 0-8 accepted.
MA_TYPES_MAPPING
* `signalmatype` - moving average type for the signal moving average. Defaults to 0. Integers 0-8 accepted.
MA_TYPES_MAPPING
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def macdext(symbol, interval, series_type, opts \\ []) do
type_3_function(:macdext, symbol, interval, series_type, opts)
end
@doc """
Uses Alpha Vantage's STOCH function.
Returns the stochastic oscillator (STOCH) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `opts` - A list of extra options to pass to the function
Allowed options:
* `fastkperiod` - The time period of the fastk moving average. Positive integers are accepted. Defaults to 5
* `slowkperiod` - The time period of the slowk moving average. Positive integers are accepted. Defaults to 3
* `slowdperiod` - The time period of the slowd moving average. Positive integers are accepted. Defaults to 3
* `slowkmatype` - Moving average type for the slowk moving average. Defaults to 0. Integers 0-8 accepted.
MA_TYPES_MAPPING
* `slowdmatype` - Moving average type for the slowd moving average. Defaults to 0. Integers 0-8 accepted.
MA_TYPES_MAPPING
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def stoch(symbol, interval, opts \\ []) do
type_4_function(:stoch, symbol, interval, opts)
end
@doc """
Uses Alpha Vantage's STOCHF function.
Returns the stochastic fast (STOCHF) values with controllable moving average type.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `opts` - A list of extra options to pass to the function
Allowed options:
* `fastkperiod` - The time period of the fastk moving average. Positive integers are accepted. Defaults to 5
* `fastdperiod` - The time period of the fastd moving average. Positive integers are accepted. Defaults to 3
* `fastdmatype` - Moving average type for the fastd moving average. Defaults to 0. Integers 0-8 accepted.
MA_TYPES_MAPPING
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def stochf(symbol, interval, opts \\ []) do
type_4_function(:stochf, symbol, interval, opts)
end
@doc """
Uses Alpha Vantage's TRIMA function.
Returns the triangular moving average (TRIMA) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def trima(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:trima, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's KAMA function.
Returns the Kaufman adaptive moving average (KAMA) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def kama(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:kama, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's MAMA function.
Returns the MESA adaptive moving average (MAMA) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `fastlimit` - positive floats are accepted. Defaults to 0.01
* `slowlimit` - positive floats are accepted. Defaults to 0.01
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def mama(symbol, interval, series_type, opts \\ []) do
type_3_function(:mama, symbol, interval, series_type, opts)
end
@doc """
Uses Alpha Vantage's VWAP function.
Returns the volume weighted average price (VWAP) for intraday time series.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"). Supported values: 1, 5, 15, 30, 60.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def vwap(symbol, interval, opts \\ []) do
type_4_function(:vwap, symbol, interval, opts)
end
@doc """
Uses Alpha Vantage's RSI function.
Returns the relative strength index (RSI) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def rsi(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:rsi, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's RSI function.
Returns the relative strength index (RSI) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `fastkperiod` - The time period of the fastk moving average. Positive integers are accepted.
* `fastdperiod` - The time period of the fastd moving average. Positive integers are accepted.
* `fastdmatype` - Moving average type for the fastd moving average. Integers 0-8 are accepted.
0 = SMA. 1 = EMA. 2 = WMA. 3 = DEMA. 4 = TEMA. 5 = TRIMA. 6 = T3. 7 = KAMA. 8 = MAMA
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def stochrsi(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:stochrsi, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's WILLR function.
Returns the Williams' %R (WILLR) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def willr(symbol, interval, time_period, opts \\ []) do
type_2_function(:willr, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's ADX function.
Returns the average directional movement index (ADX) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def adx(symbol, interval, time_period, opts \\ []) do
type_2_function(:adx, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's ADXR function.
Returns the average directional movement index rating (ADXR) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def adxr(symbol, interval, time_period, opts \\ []) do
type_2_function(:adxr, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's APO function.
Returns the absolute price oscillator (APO) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `fastperiod` - positive integers are accepted. Defaults to 12
* `slowperiod` - positive integers are accepted. Defaults to 26
* `matype` - Moving average type. Defaults to 0. Integers 0-8 are accepted.
MA_TYPES_MAPPING
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def apo(symbol, interval, series_type, opts \\ []) do
type_3_function(:apo, symbol, interval, series_type, opts)
end
@doc """
Uses Alpha Vantage's PPO function.
Returns the percentage price oscillator (PPO) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `fastperiod` - positive integers are accepted. Defaults to 12
* `slowperiod` - positive integers are accepted. Defaults to 26
* `matype` - Moving average type. Defaults to 0. Integers 0-8 are accepted.
MA_TYPES_MAPPING
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def ppo(symbol, interval, series_type, opts \\ []) do
type_3_function(:ppo, symbol, interval, series_type, opts)
end
@doc """
Uses Alpha Vantage's MOM function.
Returns the momentum (MOM) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def mom(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:mom, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's BOP function.
Returns the balance of power (BOP) values with controllable moving average type.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def bop(symbol, interval, opts \\ []) do
type_4_function(:bop, symbol, interval, opts)
end
@doc """
Uses Alpha Vantage's CCI function.
Returns the commodity channel index (CCI) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def cci(symbol, interval, time_period, opts \\ []) do
type_2_function(:cci, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's CMO function.
Returns the Chandem momentum oscillator (CMO) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def cmo(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:cmo, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's ROC function.
Returns the rate of change (ROC) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def roc(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:roc, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's ROCR function.
Returns the rate of change ratio (ROCR) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def rocr(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:rocr, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's AROON function.
Returns the Aroon (AROON) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def aroon(symbol, interval, time_period, opts \\ []) do
type_2_function(:aroon, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's AROONOSC function.
Returns the Aroon oscillator (AROONOSC) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def aroonosc(symbol, interval, time_period, opts \\ []) do
type_2_function(:aroonosc, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's MFI function.
Returns the money flow index (MFI) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each MFI value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def mfi(symbol, interval, time_period, opts \\ []) do
type_2_function(:mfi, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's TRIX function.
Returns the 1-day rate of change of a triple smooth exponential moving average (TRIX) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer.
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def trix(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:trix, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's ULTOSC function.
Returns the ultimate oscillator (ULTOSC) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `opts` - A list of extra options to pass to the function
Allowed options:
* `timeperiod1` - The first time period of the indicator. Positive integers are accepted. Defaults to 7
* `timeperiod2` - The second time period of the indicator. Positive integers are accepted. Defaults to 14
* `timeperiod3` - The third time period of indicator. Positive integers are accepted. Defaults to 28
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def ultosc(symbol, interval, opts \\ []) do
type_4_function(:ultosc, symbol, interval, opts)
end
@doc """
Uses Alpha Vantage's DX function.
Returns the directional movement index (DX) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def dx(symbol, interval, time_period, opts \\ []) do
type_2_function(:dx, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's MINUS_DI function.
Returns the minus directional indicator (MINUS_DI) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def minus_di(symbol, interval, time_period, opts \\ []) do
type_2_function(:minus_di, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's PLUS_DI function.
Returns the plus directional indicator (PLUS_DI) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def plus_di(symbol, interval, time_period, opts \\ []) do
type_2_function(:plus_di, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's MINUS_DM function.
Returns the minus directional movement (MINUS_DM) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def minus_dm(symbol, interval, time_period, opts \\ []) do
type_2_function(:minus_dm, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's PLUS_DM function.
Returns the plus directional movement (PLUS_DM) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def plus_dm(symbol, interval, time_period, opts \\ []) do
type_2_function(:plus_dm, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's BBANDS function.
Returns the Bollinger bands (BBANDS) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer.
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function.
Allowed options:
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `nbdevup` - The standard deviation multiplier of the upper band. Positive integers are accepted. Defaults to 2.
* `nbdevdn` - The standard deviation multiplier of the lower band. Positive integers are accepted. Defaults to 2.
* `matype` - Moving average type of the time series. Defaults to 0. Integers 0-8 are accepted.
0 = SMA. 1 = EMA. 2 = WMA. 3 = DEMA. 4 = TEMA. 5 = TRIMA. 6 = T3. 7 = KAMA. 8 = MAMA
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def bbands(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:bbands, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's MIDPOINT function.
Returns the midpoint (MIDPOINT) values. `MIDPOINT = (highest value + lowest value) / 2`
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def midpoint(symbol, interval, time_period, series_type, opts \\ []) do
type_1_function(:midpoint, symbol, interval, time_period, series_type, opts)
end
@doc """
Uses Alpha Vantage's MIDPRICE function.
Returns the midpoint price (MIDPRICE) values. `MIDPRICE = (highest high + lowest low) / 2`
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each midprice value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def midprice(symbol, interval, time_period, opts \\ []) do
type_2_function(:midprice, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's SAR function.
Returns the parabolic SAR (SAR) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `opts` - A list of extra options to pass to the function
Allowed options:
* `acceleration` - The acceleration factor. Positive floats are accepted. Defaults to 0.01
* `maximum` - The acceleration factor maximum value. Positive floats are accepted. Defaults to 0.20
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def sar(symbol, interval, opts \\ []) do
type_4_function(:sar, symbol, interval, opts)
end
@doc """
Uses Alpha Vantage's TRANGE function.
Returns the true range (TRANGE) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def trange(symbol, interval, opts \\ []) do
type_4_function(:trange, symbol, interval, opts)
end
@doc """
Uses Alpha Vantage's ATR function.
Returns the average true range (ATR) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def atr(symbol, interval, time_period, opts \\ []) do
type_2_function(:atr, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's NATR function.
Returns the normalized average true range (NATR) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `time_period` - Number of data points to calculate each moving average value. Should be a positive integer
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def natr(symbol, interval, time_period, opts \\ []) do
type_2_function(:natr, symbol, interval, time_period, opts)
end
@doc """
Uses Alpha Vantage's AD function.
Returns the Chaikin A/D line (AD) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def ad(symbol, interval, opts \\ []) do
type_4_function(:ad, symbol, interval, opts)
end
@doc """
Uses Alpha Vantage's ADOSC function.
Returns the Chaikin A/D oscillator (ADOSC) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `opts` - A list of extra options to pass to the function
Allowed options:
* `fastperiod` - The time period of the fast EMA. Positive integers are accepted. Defaults to 3.
* `slowperiod` - The time period of the slow EMA. Positive integers are accepted. Defaults to 10.
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def adosc(symbol, interval, opts \\ []) do
type_4_function(:adosc, symbol, interval, opts)
end
@doc """
Uses Alpha Vantage's OBV function.
Returns the on balance volume (OBV) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def obv(symbol, interval, opts \\ []) do
type_4_function(:obv, symbol, interval, opts)
end
@doc """
Uses Alpha Vantage's HT_TRENDLINE function.
Returns the Hilbert transform, instantaneous trendline (HT_TRENDLINE) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def ht_trendline(symbol, interval, series_type, opts \\ []) do
type_3_function(:ht_trendline, symbol, interval, series_type, opts)
end
@doc """
Uses Alpha Vantage's HT_SINE function.
Returns the Hilbert transform, sine wave (HT_SINE) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def ht_sine(symbol, interval, series_type, opts \\ []) do
type_3_function(:ht_sine, symbol, interval, series_type, opts)
end
@doc """
Uses Alpha Vantage's HT_TRENDMODE function.
Returns the Hilbert transform, trend vs cycle mode (HT_TRENDMODE) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def ht_trendmode(symbol, interval, series_type, opts \\ []) do
type_3_function(:ht_trendmode, symbol, interval, series_type, opts)
end
@doc """
Uses Alpha Vantage's HT_DCPERIOD function.
Returns the Hilbert transform, dominant cycle period (HT_DCPERIOD) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def ht_dcperiod(symbol, interval, series_type, opts \\ []) do
type_3_function(:ht_dcperiod, symbol, interval, series_type, opts)
end
@doc """
Uses Alpha Vantage's HT_DCPHASE function.
Returns the Hilbert transform, dominant cycle phase (HT_DCPHASE) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def ht_dcphase(symbol, interval, series_type, opts \\ []) do
type_3_function(:ht_dcphase, symbol, interval, series_type, opts)
end
@doc """
Uses Alpha Vantage's HT_PHASOR function.
Returns the Hilbert transform, phasor components (HT_PHASOR) values.
Args:
* `symbol` - The name of the security of your choice. E.g. `MSFT`
* `interval` - Interval between two consecutive data points in the time series.
You can pass in a number to specify minutes (e.g. 1 == "1min"), or specify the period with the
strings known by Alpha Vantage `(:daily | :weekly | :monthly)`
* `series_type` - The desired price type in the time series. Four types are supported: `:close`, `:open`, `:high`, `:low`.
* `opts` - A list of extra options to pass to the function
Allowed options:
* `datatype` - `:map | :json | :csv` specifies the return format. Defaults to :map
"""
def ht_phasor(symbol, interval, series_type, opts \\ []) do
type_3_function(:ht_phasor, symbol, interval, series_type, opts)
end
defp type_1_function(func, symbol, interval, time_period, series_type, opts) do
params =
%{
symbol: symbol,
interval: parse_interval(interval),
time_period: time_period,
series_type: series_type
}
|> Map.merge(Map.new(opts))
|> clean_params()
resolve_request(func, params)
end
defp type_2_function(func, symbol, interval, time_period, opts) do
params = %{
symbol: symbol,
interval: parse_interval(interval),
time_period: time_period
}
|> Map.merge(Map.new(opts))
|> clean_params()
resolve_request(func, params)
end
defp type_3_function(func, symbol, interval, series_type, opts) do
params = %{
symbol: symbol,
interval: parse_interval(interval),
series_type: series_type
}
|> Map.merge(Map.new(opts))
|> clean_params()
resolve_request(func, params)
end
def type_4_function(func, symbol, interval, opts) do
params = %{
symbol: symbol,
interval: parse_interval(interval)
}
|> Map.merge(Map.new(opts))
|> clean_params()
resolve_request(func, params)
end
end | lib/vantagex/technical_indicators.ex | 0.96217 | 0.989161 | technical_indicators.ex | starcoder |
defmodule RemoteDockers.NodeConfig do
@enforce_keys [:hostname, :port]
defstruct [:hostname, :port, :ssl, :label]
@default_port 2376
@doc """
Build configuration with defaults
default:
```
hostname: "localhost"
port: #{@default_port}
```
## Example:
```elixir
iex> NodeConfig.new()
%NodeConfig{hostname: "localhost", port: 2376}
```
"""
def new() do
new("localhost", @default_port)
end
@doc """
Build configuration with a specific hostname
default:
```
port: #{@default_port}
```
## Example:
```elixir
iex> NodeConfig.new("192.168.99.100")
%NodeConfig{hostname: "192.168.99.100", port: 2376}
```
"""
def new(hostname) do
new(hostname, @default_port)
end
@doc """
Build configuration with specific hostname and port
## Example:
```elixir
iex> NodeConfig.new("192.168.99.100", 2345)
%NodeConfig{hostname: "192.168.99.100", port: 2345}
```
"""
def new(hostname, port) do
%RemoteDockers.NodeConfig{
hostname: hostname,
port: port
}
end
@doc """
Build configuration with SSL
default:
```
port: #{@default_port}
```
## Example:
```elixir
iex> NodeConfig.new("192.168.99.100", "cert.pem", "key.pem")
%NodeConfig{
hostname: "192.168.99.100",
port: 2376,
ssl: [
certfile: "cert.pem",
keyfile: "key.pem"
]
}
```
"""
def new(hostname, nil, nil), do: new(hostname)
def new(hostname, certfile, keyfile) do
new(hostname, @default_port, certfile, keyfile)
end
@doc """
Build configuration with hostname, port and SSL
## Example:
```elixir
iex> NodeConfig.new("192.168.99.100", 2345, "cert.pem", "key.pem")
%NodeConfig{
hostname: "192.168.99.100",
port: 2345,
ssl: [
certfile: "cert.pem",
keyfile: "key.pem"
]
}
```
"""
def new(hostname, port, nil, nil), do: new(hostname, port)
def new(hostname, port, certfile, keyfile) do
%RemoteDockers.NodeConfig{
hostname: hostname,
port: port,
ssl: [
certfile: certfile,
keyfile: keyfile
]
}
end
@doc """
Build configuration with hostname, port and SSL with Certificate Authority
## Example:
```elixir
iex> NodeConfig.new("192.168.99.100", 2345, "ca.pem", "cert.pem", "key.pem")
%NodeConfig{
hostname: "192.168.99.100",
port: 2345,
ssl: [
cacertfile: "ca.pem",
certfile: "cert.pem",
keyfile: "key.pem"
]
}
```
"""
def new(hostname, port, nil, nil, nil), do: new(hostname, port)
def new(hostname, port, nil, certfile, keyfile), do: new(hostname, port, certfile, keyfile)
def new(hostname, port, cacertfile, certfile, keyfile) do
%RemoteDockers.NodeConfig{
hostname: hostname,
port: port,
ssl: [
cacertfile: cacertfile,
certfile: certfile,
keyfile: keyfile
]
}
end
@doc """
Set label for this configuration
## Example:
```elixir
iex> NodeConfig.new() |> NodeConfig.set_label("My Local Node")
%NodeConfig{
hostname: "localhost",
port: 2376,
label: "My Local Node"
}
```
"""
def set_label(%RemoteDockers.NodeConfig{} = node_config, label) do
Map.put(node_config, :label, label)
end
@doc """
Get HTTPoison default options with ssl if enabled
"""
def get_options(%RemoteDockers.NodeConfig{ssl: nil} = _node_config), do: []
def get_options(node_config) do
[
ssl: node_config.ssl
]
end
end | lib/node_config.ex | 0.775265 | 0.840193 | node_config.ex | starcoder |
defmodule Cloudinary.Transformation.Effect.Tint do
@moduledoc false
import Cloudinary.Transformation.Color
defguardp is_truthy(as_boolean) when as_boolean not in [false, nil]
defguardp is_amount(amount) when amount <= 100 and amount >= 0
defguardp is_position(position) when position <= 100 and position >= 0
@spec to_url_string(%{
optional(:equalize) => as_boolean(any),
optional(:amount) => 0..100 | float,
optional(:color) =>
Cloudinary.Transformation.Color.t()
| {Cloudinary.Transformation.Color.t(), 0..100 | float}
| [Cloudinary.Transformation.Color.t()]
| [{Cloudinary.Transformation.Color.t(), 0..100 | float}]
}) :: String.t()
def to_url_string(%{equalize: equalize, amount: amount, color: {color, position}})
when is_truthy(equalize) and is_amount(amount) and is_rgb(color) and is_position(position) do
"tint:equalize:#{amount}:rgb:#{color}:#{position}p"
end
def to_url_string(%{equalize: equalize, amount: amount, color: {color, position}})
when is_truthy(equalize) and is_amount(amount) and
is_binary(color) and is_position(position) do
"tint:equalize:#{amount}:#{color}:#{position}p"
end
def to_url_string(%{equalize: equalize, amount: amount, color: color})
when is_truthy(equalize) and is_amount(amount) and is_rgb(color) do
"tint:equalize:#{amount}:rgb:#{color}"
end
def to_url_string(%{equalize: equalize, amount: amount, color: color})
when is_truthy(equalize) and is_amount(amount) and is_binary(color) do
"tint:equalize:#{amount}:#{color}"
end
def to_url_string(%{equalize: equalize, amount: amount, color: colors})
when is_truthy(equalize) and is_amount(amount) and is_list(colors) do
"tint:equalize:#{amount}:#{extract_color_list(colors)}"
end
def to_url_string(%{amount: amount, color: {color, position}})
when is_amount(amount) and is_rgb(color) and is_position(position) do
"tint:#{amount}:rgb:#{color}:#{position}p"
end
def to_url_string(%{amount: amount, color: {color, position}})
when is_amount(amount) and is_binary(color) and is_position(position) do
"tint:#{amount}:#{color}:#{position}p"
end
def to_url_string(%{amount: amount, color: color}) when is_amount(amount) and is_rgb(color) do
"tint:#{amount}:rgb:#{color}"
end
def to_url_string(%{amount: amount, color: color})
when is_amount(amount) and is_binary(color) do
"tint:#{amount}:#{color}"
end
def to_url_string(%{amount: amount, color: colors})
when is_amount(amount) and is_list(colors) do
"tint:#{amount}:#{extract_color_list(colors)}"
end
def to_url_string(%{equalize: equalize, amount: amount})
when is_amount(amount) and is_truthy(equalize) do
"tint:equalize:#{amount}"
end
def to_url_string(%{equalize: equalize, color: {color, position}})
when is_truthy(equalize) and is_rgb(color) and is_position(position) do
"tint:equalize:60:rgb:#{color}:#{position}p"
end
def to_url_string(%{equalize: equalize, color: {color, position}})
when is_truthy(equalize) and is_binary(color) and is_position(position) do
"tint:equalize:60:#{color}:#{position}p"
end
def to_url_string(%{equalize: equalize, color: color})
when is_truthy(equalize) and is_rgb(color) do
"tint:equalize:60:rgb:#{color}"
end
def to_url_string(%{equalize: equalize, color: color})
when is_truthy(equalize) and is_binary(color) do
"tint:equalize:60:#{color}"
end
def to_url_string(%{equalize: equalize, color: colors})
when is_truthy(equalize) and is_list(colors) do
"tint:equalize:60:#{extract_color_list(colors)}"
end
def to_url_string(%{amount: amount}) when is_amount(amount), do: "tint:#{amount}"
def to_url_string(%{color: {color, position}}) when is_rgb(color) and is_position(position) do
"tint:60:rgb:#{color}:#{position}p"
end
def to_url_string(%{color: {color, position}})
when is_binary(color) and is_position(position) do
"tint:60:#{color}:#{position}p"
end
def to_url_string(%{color: color}) when is_rgb(color), do: "tint:60:rgb:#{color}"
def to_url_string(%{color: color}) when is_binary(color), do: "tint:60:#{color}"
def to_url_string(%{color: colors}) when is_list(colors) do
"tint:60:#{extract_color_list(colors)}"
end
def to_url_string(%{equalize: equalize}) when is_truthy(equalize), do: "tint:equalize"
@spec extract_color_list(
[Cloudinary.Transformation.Color.t()]
| [{Cloudinary.Transformation.Color.t(), 0..100 | float}]
) :: String.t()
defp extract_color_list(colors) when is_list(colors) do
cond do
Enum.all?(colors, &is_tuple/1) -> Enum.map_join(colors, ":", &position_to_string/1)
true -> Enum.map_join(colors, ":", &color_to_string/1)
end
end
@spec position_to_string({Cloudinary.Transformation.Color.t(), 0..100 | float}) :: String.t()
defp position_to_string({color, position}) when is_rgb(color) and is_position(position) do
"rgb:#{color}:#{position}p"
end
defp position_to_string({color, position}) when is_binary(color) and is_position(position) do
"#{color}:#{position}p"
end
@spec color_to_string(Cloudinary.Transformation.Color.t()) :: String.t()
defp color_to_string(color) when is_rgb(color), do: "rgb:#{color}"
defp color_to_string(color) when is_binary(color), do: color
end | lib/cloudinary/transformation/effect/tint.ex | 0.835852 | 0.518241 | tint.ex | starcoder |
defmodule Cldr.UnknownLocaleError do
@moduledoc """
Exception raised when an attempt is made to use a locale not configured
in `Cldr`. `Cldr.known_locale_names/1` returns the locale names known to `Cldr`.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownNumberSystemError do
@moduledoc """
Exception raised when an attempt is made to use a number system that is not known
in `Cldr`. `Cldr.Number.number_system_names/0` returns the number system names known to `Cldr`.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.NoMatchingLocale do
@moduledoc """
Exception raised when no configured locale matches the provided "Accept-Language" header
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownNumberSystemTypeError do
@moduledoc """
Exception raised when an attempt is made to use a number system type that is not known
in `Cldr`. `Cldr.Number.number_system_types/0` returns the number system types known to `Cldr`.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownFormatError do
@moduledoc """
Exception raised when an attempt is made to use a locale that is not configured
in `Cldr`. `Cldr.known_locale_names/1` returns the locale names known to `Cldr`.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownUnitError do
@moduledoc """
Exception raised when an attempt is made to use a unit that is not known.
in `Cldr`.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownCalendarError do
@moduledoc """
Exception raised when an attempt is made to use a calendar that is not known.
in `Cldr`.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.FormatError do
@moduledoc """
Exception raised when there is an error in the formatting of a number/list/...
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.FormatCompileError do
@moduledoc """
Exception raised when there is an error in the compiling of a number format.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownCurrencyError do
@moduledoc """
Exception raised when there is an invalid currency code.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownTerritoryError do
@moduledoc """
Exception raised when there is an invalid territory code.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownOTPAppError do
@moduledoc """
Exception raised when the configured OTP app
is not known
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownPluralRules do
@moduledoc """
Exception raised when there are no plural rules for a locale or language.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.InvalidDateFormatType do
@moduledoc """
Exception raised when there is an invalid date format type.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.InvalidTimeFormatType do
@moduledoc """
Exception raised when there is an invalid time format type.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.InvalidDateTimeFormatType do
@moduledoc """
Exception raised when there is an invalid datetime format type.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.Rbnf.NotAvailable do
@moduledoc """
Exception raised when there is no RBNF for a locale.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.AcceptLanguageError do
@moduledoc """
Exception raised when there no valid language tag
in an `Accept-Language` header.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.LanguageTag.ParseError do
@moduledoc """
Exception raised when a language tag cannot
be parsed (there is unparsed content).
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.NoDefaultBackendError do
@moduledoc """
Exception raised when there is no
default backend configured
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownMeasurementSystemError do
@moduledoc """
Exception raised when the measurement
system is invalid.
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.UnknownBackendError do
@moduledoc """
Exception raised when the backend
module is unknown or not a backend module
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end
defmodule Cldr.AmbiguousTimezoneError do
@moduledoc """
Exception raised when the there are more
than one timezones for a locale
"""
defexception [:message]
def exception(message) do
%__MODULE__{message: message}
end
end | lib/cldr/exception.ex | 0.879568 | 0.42182 | exception.ex | starcoder |
defmodule CCSP.Chapter4.Graph do
alias __MODULE__, as: T
alias CCSP.Chapter4.Edge
@moduledoc """
Corresponds to CCSP in Python, Chapter 4, titled "Graph problems"
This module may be a good candidate to use a gen server.
Also good candidate for converting lists to maps for better lookup times
"""
defstruct [:vertices, :edges]
@type a :: any
@type t :: %T{
vertices: list(a),
edges: list(list(Edge.t()))
}
@spec new(list(a)) :: t
def new(vertices) do
edges =
Enum.map(vertices, fn _ ->
[]
end)
%T{vertices: vertices, edges: edges}
end
@spec vertex_count(t) :: non_neg_integer
def vertex_count(graph) do
length(graph.vertices)
end
@spec edge_count(t) :: non_neg_integer
def edge_count(graph) do
Enum.map(graph.edges, fn edge ->
length(edge)
end)
|> Enum.sum()
end
@spec add_vertex(t, a) :: t
def add_vertex(graph, vertex) do
vertices = [vertex | graph.vertices]
edges = [[] | graph.edges]
%T{vertices: vertices, edges: edges}
end
@spec add_edge(t, a) :: t
def add_edge(graph, edge) do
edges =
graph.edges
|> List.update_at(edge.u, &[edge | &1])
|> List.update_at(edge.v, &[Edge.reversed(edge) | &1])
%T{vertices: graph.vertices, edges: edges}
end
@spec add_edge_by_indicies(t, non_neg_integer, non_neg_integer) :: t
def add_edge_by_indicies(graph, u, v) do
add_edge(graph, Edge.new(u, v))
end
@spec add_edge_by_vertices(t, a, a) :: t
def add_edge_by_vertices(graph, first, second) do
u = Enum.find_index(graph.vertices, &(&1 == first))
v = Enum.find_index(graph.vertices, &(&1 == second))
add_edge_by_indicies(graph, u, v)
end
@spec vertex_at(t, non_neg_integer) :: a
def vertex_at(graph, index) do
Enum.at(graph.vertices, index)
end
@spec index_of(t, a) :: non_neg_integer
def index_of(graph, vertex) do
Enum.find_index(graph.vertices, fn x -> x == vertex end)
end
@spec neighbors_for_index(t, non_neg_integer) :: list(a)
def neighbors_for_index(graph, index) do
graph.edges
|> Enum.at(index)
|> Enum.map(&vertex_at(graph, &1.v))
end
@spec neighbors_for_vertex(t, a) :: list(a)
def neighbors_for_vertex(graph, vertex) do
neighbors_for_index(graph, Enum.find_index(graph.vertices, &(&1 == vertex)))
end
@spec edges_for_index(t, non_neg_integer) :: list(Edge.t())
def edges_for_index(graph, index) do
Enum.at(graph.edges, index)
end
@spec edges_for_vertex(t, a) :: list(Edge.t())
def edges_for_vertex(graph, vertex) do
edges_for_index(graph, Enum.find_index(graph.vertices, &(&1 == vertex)))
end
end
defimpl Inspect, for: CCSP.Chapter4.Graph do
alias CCSP.Chapter4.Graph
def inspect(graph, _opts) do
Enum.reduce(0..(Graph.vertex_count(graph) - 1), "", fn i, acc ->
vertex = Graph.vertex_at(graph, i)
vertex_neighbors = Graph.neighbors_for_index(graph, i)
acc <> "#{vertex} -> #{Enum.join(vertex_neighbors, ", ")}\n"
end)
end
end | lib/ccsp/chapter4/graph.ex | 0.733738 | 0.724164 | graph.ex | starcoder |
defmodule URI do
@moduledoc """
Utilities for working with and creating URIs.
"""
defrecord Info, [scheme: nil, path: nil, query: nil,
fragment: nil, authority: nil,
userinfo: nil, host: nil, port: nil]
import Bitwise
@ports [
{ "ftp", 21 },
{ "http", 80 },
{ "https", 443 },
{ "ldap", 389 },
{ "sftp", 22 },
{ "tftp", 69 },
]
Enum.each @ports, fn { scheme, port } ->
def normalize_scheme(unquote(scheme)), do: unquote(scheme)
def default_port(unquote(scheme)), do: unquote(port)
end
@doc """
Normalizes the scheme according to the spec by downcasing it.
"""
def normalize_scheme(nil), do: nil
def normalize_scheme(scheme), do: String.downcase(scheme)
@doc """
Returns the default port for a given scheme.
If the scheme is unknown to URI, returns `nil`.
Any scheme may be registered via `default_port/2`.
"""
def default_port(scheme) when is_binary(scheme) do
{ :ok, dict } = :application.get_env(:elixir, :uri)
Dict.get(dict, scheme)
end
@doc """
Registers a scheme with a default port.
"""
def default_port(scheme, port) when is_binary(scheme) and port > 0 do
{ :ok, dict } = :application.get_env(:elixir, :uri)
:application.set_env(:elixir, :uri, Dict.put(dict, scheme, port))
end
@doc """
Takes an enumerable (containing a sequence of two-item tuples)
and returns a string of the form "k=v&k2=v2..." where keys and values are
URL encoded as per `encode/1`. Keys and values can be any term
that implements the `String.Chars` protocol (i.e. can be converted
to a binary).
"""
def encode_query(l), do: Enum.map_join(l, "&", pair(&1))
@doc """
Given a query string of the form "key1=value1&key=value2...", produces an
orddict with one entry for each key-value pair. Each key and value will be a
binary. It also does percent-unescaping of both keys and values.
Use `query_decoder/1` if you want to iterate over each value manually.
"""
def decode_query(q, dict // HashDict.new) when is_binary(q) do
Enum.reduce query_decoder(q), dict, fn({ k, v }, acc) -> Dict.put(acc, k, v) end
end
@doc """
Returns an iterator function over the query string that decodes
the query string in steps.
"""
def query_decoder(q) when is_binary(q) do
fn(acc, fun) ->
do_decoder(q, acc, fun)
end
end
defp do_decoder("", acc, _fun) do
acc
end
defp do_decoder(q, acc, fun) do
next =
case :binary.split(q, "&") do
[first, rest] -> rest
[first] -> ""
end
current =
case :binary.split(first, "=") do
[ key, value ] -> { decode(key), decode(value) }
[ key ] -> { decode(key), nil }
end
do_decoder(next, fun.(current, acc), fun)
end
defp pair({k, v}) do
encode(to_string(k)) <> "=" <> encode(to_string(v))
end
@doc """
Percent (URL) encodes a URI.
"""
def encode(s), do: bc(<<c>> inbits s, do: <<percent(c) :: binary>>)
defp percent(32), do: <<?+>>
defp percent(?-), do: <<?->>
defp percent(?_), do: <<?_>>
defp percent(?.), do: <<?.>>
defp percent(c)
when c >= ?0 and c <= ?9
when c >= ?a and c <= ?z
when c >= ?A and c <= ?Z do
<<c>>
end
defp percent(c), do: "%" <> hex(bsr(c, 4)) <> hex(band(c, 15))
defp hex(n) when n <= 9, do: <<n + ?0>>
defp hex(n), do: <<n + ?A - 10>>
@doc """
Unpercent (URL) decodes a URI.
"""
def decode(<<?%, hex1, hex2, tail :: binary >>) do
<< bsl(hex2dec(hex1), 4) + hex2dec(hex2) >> <> decode(tail)
end
def decode(<<head, tail :: binary >>) do
<<check_plus(head)>> <> decode(tail)
end
def decode(<<>>), do: <<>>
defp hex2dec(n) when n in ?A..?F, do: n - ?A + 10
defp hex2dec(n) when n in ?0..?9, do: n - ?0
defp check_plus(?+), do: 32
defp check_plus(c), do: c
@doc """
Parses a URI into components.
URIs have portions that are handled specially for the
particular scheme of the URI. For example, http and https
have different default ports. Sometimes the parsing
of portions themselves are different. This parser
is extensible via behavior modules. If you have a
module named `URI.MYSCHEME` with a function called
`parse` that takes a single argument, the generically
parsed URI, that function will be called when this
parse function is passed a URI of that scheme. This
allows you to build on top of what the URI library
currently offers. You also need to define `default_port`
which takes no arguments and returns the default port
for that particular scheme. Take a look at `URI.HTTPS` for an
example of one of these extension modules.
"""
def parse(s) when is_binary(s) do
# From http://tools.ietf.org/html/rfc3986#appendix-B
regex = %r/^(([^:\/?#]+):)?(\/\/([^\/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?/
parts = nillify(Regex.run(regex, s))
destructure [_, _, scheme, _, authority, path, _, query, _, fragment], parts
{ userinfo, host, port } = split_authority(authority)
if authority do
authority = ""
if userinfo, do: authority = authority <> userinfo <> "@"
if host, do: authority = authority <> host
if port, do: authority = authority <> ":" <> integer_to_binary(port)
end
scheme = normalize_scheme(scheme)
if nil?(port) and not nil?(scheme) do
port = default_port(scheme)
end
URI.Info[
scheme: scheme, path: path, query: query,
fragment: fragment, authority: authority,
userinfo: userinfo, host: host, port: port
]
end
# Split an authority into its userinfo, host and port parts.
defp split_authority(s) do
s = s || ""
components = Regex.run %r/(^(.*)@)?(\[[a-zA-Z0-9:.]*\]|[^:]*)(:(\d*))?/, s
destructure [_, _, userinfo, host, _, port], nillify(components)
port = if port, do: binary_to_integer(port)
host = if host, do: host |> String.lstrip(?[) |> String.rstrip(?])
{ userinfo, host, port }
end
# Regex.run returns empty strings sometimes. We want
# to replace those with nil for consistency.
defp nillify(l) do
lc s inlist l do
if size(s) > 0, do: s, else: nil
end
end
end
defimpl String.Chars, for: URI.Info do
def to_string(URI.Info[] = uri) do
scheme = uri.scheme
if scheme && (port = URI.default_port(scheme)) do
if uri.port == port, do: uri = uri.port(nil)
end
result = ""
if uri.scheme, do: result = result <> uri.scheme <> "://"
if uri.userinfo, do: result = result <> uri.userinfo <> "@"
if uri.host, do: result = result <> uri.host
if uri.port, do: result = result <> ":" <> integer_to_binary(uri.port)
if uri.path, do: result = result <> uri.path
if uri.query, do: result = result <> "?" <> uri.query
if uri.fragment, do: result = result <> "#" <> uri.fragment
result
end
end | lib/elixir/lib/uri.ex | 0.850872 | 0.564639 | uri.ex | starcoder |
defmodule ExAlgo.Tree.BinarySearchTree do
@moduledoc """
Implements a binary search tree.
"""
@type key_type :: any()
@type value_type :: any()
@type key_fn :: (value_type() -> key_type())
@type leaf :: nil
@type t() :: %__MODULE__{
data: value_type(),
left: t() | leaf(),
right: t() | leaf()
}
@identity &Function.identity/1
@doc """
A binary tree contains data, left child and right child
"""
defstruct [:data, left: nil, right: nil]
@doc """
Create a new tree with data being the root.
## Example
iex> BST.new(0)
%BST{data: 0, left: nil, right: nil}
iex> BST.new(%{id: 10, name: "Mafinar"})
%BST{data: %{id: 10, name: "Mafinar"}, left: nil, right: nil}
"""
def new(data), do: %__MODULE__{data: data}
@doc """
Creates a binary search tree from a list.
## Example
iex> BST.from [5, 4, 7]
%BST{
data: 5,
left: %BST{
data: 4,
left: nil,
right: nil
},
right: %BST{
data: 7,
left: nil,
right: nil
}
}
"""
@spec from([value_type()]) :: t()
def from([x | xs]) do
xs
|> Enum.reduce(
__MODULE__.new(x),
fn item, tree -> tree |> insert(item) end
)
end
@doc """
Insert a new item in the correct position in the tree.
## Example
iex> BST.new(10)
...> |> BST.insert(11)
...> |> BST.insert(-34)
...> |> BST.insert(14)
...> |> BST.insert(0)
...> |> BST.insert(-75)
%BST{
data: 10,
left: %BST{
data: -34,
left: %BST{
data: -75,
left: nil,
right: nil
},
right: %BST{
data: 0,
left: nil,
right: nil
},
},
right: %BST{
data: 11,
left: nil,
right: %BST{
data: 14,
left: nil,
right: nil
},
}
}
"""
@spec insert(t(), value_type()) :: t()
def insert(%__MODULE__{data: data, left: left} = tree, item) when item < data do
case left do
nil -> %{tree | left: %__MODULE__{data: item}}
left -> %{tree | left: insert(left, item)}
end
end
def insert(%__MODULE__{right: right} = tree, item) do
case right do
nil -> %{tree | right: %__MODULE__{data: item}}
right -> %{tree | right: insert(right, item)}
end
end
@doc """
Find an item on the tree.
## Example
iex> tree = BST.from [10, 11, -34, 14, 0, -75]
iex> BST.find(tree, 11)
11
iex> BST.find(tree, 9)
nil
iex> languages = [
...> %{id: 1, language: "Elixir"},
...> %{id: 2, language: "Python"},
...> %{id: 3, language: "C++"}
...> ]
iex> tree = BST.from(languages)
iex> BST.find(tree, 2, & &1.id)
%{id: 2, language: "Python"}
iex> BST.find(tree, 6, & &1.id)
nil
"""
@spec find(t() | nil, key_type(), key_fn()) :: nil | value_type()
def find(_, _, key_fn \\ @identity)
def find(nil, _, _), do: nil
def find(%__MODULE__{data: data, left: left, right: right}, key, key_fn) do
case key_fn.(data) do
^key -> data
current_key when current_key < key -> find(right, key, key_fn)
_ -> find(left, key, key_fn)
end
end
end | lib/ex_algo/tree/binary_search_tree.ex | 0.910809 | 0.566288 | binary_search_tree.ex | starcoder |
defmodule Alchemy.Webhook do
@moduledoc """
"""
alias Alchemy.Discord.Webhooks
alias Alchemy.{Embed, User}
import Alchemy.Discord.RateManager, only: [send_req: 2]
@type snowflake :: String.t()
@type t :: %__MODULE__{
id: snowflake,
guild_id: snowflake | nil,
channel_id: snowflake,
user: User.t() | nil,
name: String.t() | nil,
avatar: String.t() | nil,
token: String.t()
}
defstruct [:id, :guild_id, :channel_id, :user, :name, :avatar, :token]
@doc """
Creates a new webhook in a channel.
The name parameter is mandatory, and specifies the name of the webhook.
of course.
## Options
- `avatar`
A link to a 128x128 image to act as the avatar of the webhook.
## Examples
```elixir
{:ok, hook} = Webhook.create("66666", "The Devil")
```
"""
@spec create(snowflake, String.t(), avatar: String.t()) ::
{:ok, __MODULE__.t()}
| {:error, term}
def create(channel_id, name, options \\ []) do
{Webhooks, :create_webhook, [channel_id, name, options]}
|> send_req("/channels/webhooks")
end
@doc """
Returns a list of all webhooks in a channel.
## Examples
```elixir
{:ok, [%Webhook{} | _]} = Webhook.in_channel("6666")
```
"""
@spec in_channel(snowflake) :: {:ok, [__MODULE__.t()]} | {:error, term}
def in_channel(channel_id) do
{Webhooks, :channel_webhooks, [channel_id]}
|> send_req("/channels/webhooks")
end
@doc """
Returns a list of all webhooks in a guild.
## Examples
```elixir
{:ok, [%Webhook{} | _]} = Webhook.in_guild("99999")
```
"""
@spec in_guild(atom) :: {:ok, [__MODULE__.t()]} | {:error, term}
def in_guild(guild_id) do
{Webhooks, :guild_webhooks, [guild_id]}
|> send_req("/guilds/webhooks")
end
@doc """
Modifies the settings of a webhook.
Note that the user field of the webhook will be missing.
## Options
- `name`
The name of the webhook.
- `avatar`
A link to a 128x128 icon image.
## Examples
```elixir
{:ok, hook} = Webhook.create("6666", "Captian Hook")
# Let's fix that typo:
Webhook.edit(hook, name: "Captain Hook")
```
"""
@spec edit(__MODULE__.t(), name: String.t(), avatar: String.t()) ::
{:ok, __MODULE__.t()}
| {:error, term}
def edit(%__MODULE__{id: id, token: token}, options) do
{Webhooks, :modify_webhook, [id, token, options]}
|> send_req("/webhooks")
end
@doc """
Deletes a webhook.
All you need for this is the webhook itself.
## Examples
```elixir
{:ok, wh} = Webhook.create("666", "Captain Hook")
Webhook.delete(wh)
```
"""
@spec delete(__MODULE__.t()) :: {:ok, __MODULE__.t()} | {:error, term}
def delete(%__MODULE__{id: id, token: token}) do
{Webhooks, :delete_webhook, [id, token]}
|> send_req("/webhooks")
end
@doc """
Sends a message to a webhook.
`type` must be one of `:embed, :content`; `:embed` requiring an `Embed.t`
struct, and `:content` requiring a string.
## Options
- `avatar_url`
A link to an image to replace the one the hook has, for this message.
- `username`
The username to override to hook's, for this message.
- `tts`
When set to true, will make the message TTS
## Examples
```elixir
{:ok, hook} = Webhook.create("66", "Captain Hook")
Webhook.send(hook, {content: "ARRRRRGH!"})
```
For a more elaborate example:
```elixir
user = Cache.user()
embed = %Embed{}
|> description("I'm commandeering this vessel!!!")
|> color(0x3a83b8)
Webhook.send(hook, {:embed, embed},
avatar_url: User.avatar_url(user),
username: user.username)
```
"""
@spec send(__MODULE__.t(), {:embed, Embed.t()} | {:content, String.t()},
avatar_url: String.t(),
username: String.t(),
tts: Boolean
) ::
{:ok, nil} | {:error, term}
def send(%__MODULE__{id: id, token: token}, {type, content}, options \\ []) do
{type, content} =
case {type, content} do
{:embed, em} ->
{:embeds, [Embed.build(em)]}
x ->
x
end
options = Keyword.put(options, type, content)
{Webhooks, :execute_webhook, [id, token, options]}
|> send_req("/webhooks")
end
end | lib/Structs/webhook.ex | 0.882168 | 0.645874 | webhook.ex | starcoder |
defmodule Day15.ChitonMap do
defstruct map: %{}, dimensions: {0, 0}, scale: 1
@doc """
Parse puzzle input to generate a map from locations to Chiton risk level.
"""
def parse(input) do
{map, {y_max, x_max}} =
for {line, y} <- String.split(input, "\n", trim: true) |> Enum.with_index(),
{num, x} <- String.to_charlist(line) |> Enum.with_index(),
reduce: {%{}, {0, 0}} do
{map, _} -> {Map.put(map, {y, x}, num - ?0), {y, x}}
end
%__MODULE__{map: map, dimensions: {y_max + 1, x_max + 1}}
end
@doc """
Allows to get out of bounds to the right and bottom, using the expansion
rules of part 2 of the puzzle.
"""
def risk_at(%{map: map, dimensions: {h, w}}, {y, x}) do
raw_value = map[{rem(y, h), rem(x, w)}] + div(y, h) + div(x, w)
rem(raw_value - 1, 9) + 1
end
def scale(%__MODULE__{} = map, scale) do
%{map | scale: scale}
end
def neighbors(%{scale: scale, dimensions: {h, w}}, {y, x}) do
[{y - 1, x}, {y + 1, x}, {y, x - 1}, {y, x + 1}]
|> Enum.filter(fn {y, x} -> x >= 0 and x < w * scale and y >= 0 and y < h * scale end)
end
end
defmodule Day15.Exploration do
alias Day15.ChitonMap
defstruct exploration: %{{0, 0} => 0}, visited: MapSet.new()
@doc """
Take an exploration step.
"""
def explore(%__MODULE__{exploration: exploration, visited: visited} = state, map) do
current = choose_next_point(exploration)
# Calculate risk to reach neighbors going through the point we're visiting
neighbor_risks =
ChitonMap.neighbors(map, current)
|> Enum.reject(fn pos -> MapSet.member?(visited, pos) end)
|> Enum.map(fn pos ->
{pos, exploration[current] + ChitonMap.risk_at(map, pos)}
end)
mark_as_visited(state, current)
|> update_exploration(neighbor_risks)
end
def explore(%ChitonMap{} = map) do
explore(%__MODULE__{}, map)
end
defp choose_next_point(exploration) do
# Next point to visit is the unvisited point with the least accumulated risk
{next_point, _} = Enum.min_by(exploration, fn {_, risk} -> risk end)
next_point
end
defp mark_as_visited(%{exploration: exploration, visited: visited} = state, point) do
%{state | exploration: Map.delete(exploration, point), visited: MapSet.put(visited, point)}
end
defp update_exploration(%{exploration: exploration} = state, new_risks) do
%{
state
| exploration:
Enum.reduce(new_risks, exploration, fn {point, risk}, exp ->
Map.update(exp, point, risk, fn
old_risk when risk >= old_risk -> old_risk
_ -> risk
end)
end)
}
end
@doc """
Explore until we reach the target with minimum risk
"""
def find_min_risk(map, target) do
Stream.iterate(%__MODULE__{}, &explore(&1, map))
|> Enum.find_value(fn
%{exploration: %{^target => risk}} -> risk
_ -> nil
end)
end
end
defmodule Day15 do
alias Day15.{ChitonMap, Exploration}
def solve_part1(input) do
%{dimensions: {y_max, x_max}} = map = ChitonMap.parse(input)
target = {y_max - 1, x_max - 1}
Exploration.find_min_risk(map, target)
end
def solve_part2(input) do
%{dimensions: {y_max, x_max}} = map = ChitonMap.parse(input) |> ChitonMap.scale(5)
target = {5 * y_max - 1, 5 * x_max - 1}
Exploration.find_min_risk(map, target)
end
end | day15/solver.ex | 0.856122 | 0.742842 | solver.ex | starcoder |
defmodule Flowr.Automation do
@moduledoc """
The Automation context.
"""
import Ecto.Query, warn: false
alias Flowr.Repo
alias Flowr.Automation.Flow
alias Flowr.Automation.Action
alias Flowr.Automation.JSONTemplate
@doc """
Returns the list of flows.
## Examples
iex> list_flows()
[%Flow{}, ...]
"""
def list_flows do
Repo.all(Flow)
end
def list_flows(%Flowr.Accounts.Customer{} = customer) do
customer
|> Ecto.assoc(:flows)
|> Repo.all()
end
def list_flows(%Flowr.Platform.Trigger{} = trigger) do
trigger
|> Ecto.assoc(:flows)
|> Repo.all()
end
@doc """
Gets a single flow.
Raises `Ecto.NoResultsError` if the Flow does not exist.
## Examples
iex> get_flow!(123)
%Flow{}
iex> get_flow!(456)
** (Ecto.NoResultsError)
"""
def get_flow!(id), do: Repo.get!(Flow, id)
@doc """
Creates a flow.
## Examples
iex> create_flow(%{field: value})
{:ok, %Flow{}}
iex> create_flow(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_flow(customer, attrs \\ %{}) do
customer
|> Ecto.build_assoc(:flows)
|> Flow.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a flow.
## Examples
iex> update_flow(flow, %{field: new_value})
{:ok, %Flow{}}
iex> update_flow(flow, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_flow(%Flow{} = flow, attrs) do
flow
|> Flow.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Flow.
## Examples
iex> delete_flow(flow)
{:ok, %Flow{}}
iex> delete_flow(flow)
{:error, %Ecto.Changeset{}}
"""
def delete_flow(%Flow{} = flow) do
Repo.delete(flow)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking flow changes.
## Examples
iex> change_flow(flow)
%Ecto.Changeset{source: %Flow{}}
"""
def change_flow(%Flow{} = flow) do
Flow.changeset(flow, %{})
end
def change_flow(%Flow{} = flow, attrs) do
Flow.changeset(flow, attrs)
end
alias Flowr.Automation.Task
@doc """
Returns the list of flow_tasks.
## Examples
iex> list_flow_tasks()
[%Task{}, ...]
"""
def list_flow_tasks(%Flow{} = flow) do
query =
from t in Ecto.assoc(flow, :tasks),
order_by: [desc: :inserted_at]
query
|> Repo.all()
end
def list_flow_tasks(%Flowr.Accounts.Customer{} = customer) do
query =
from t in Ecto.assoc(customer, :flow_tasks),
preload: [:flow],
order_by: [desc: :inserted_at]
query
|> Repo.all()
end
@doc """
Gets a single task.
Raises `Ecto.NoResultsError` if the Task does not exist.
## Examples
iex> get_task!(123)
%Task{}
iex> get_task!(456)
** (Ecto.NoResultsError)
"""
def get_task!(id), do: Repo.get!(Task, id)
def run_task(%Task{} = task) do
task =
task
|> Repo.preload([:flow])
task.flow.actions
|> Enum.reduce_while(task.input_data, fn action, acc ->
case run_action(action, acc) do
{:ok, result} -> {:cont, result}
{:error, error} -> {:halt, error}
end
end)
end
def run_action(%Action{} = action, params) do
{:ok, args} = JSONTemplate.parse(action.args_template, params)
Flowr.Automation.Runner.run_action(action, args)
end
def finish_task(%Task{} = task, status, result_info) do
task
|> Task.updating_changeset(%{status: status, result_info: result_info})
|> Repo.update()
end
def change_action(%Action{} = action) do
Action.changeset(action, %{})
end
end | lib/flowr/automation/automation.ex | 0.841191 | 0.529872 | automation.ex | starcoder |
defmodule Manic do
@moduledoc """
![Manic is an Elixir client for interfacing with Bitcoin miner APIs.](https://github.com/libitx/manic/raw/master/media/poster.png)
![Hex.pm](https://img.shields.io/hexpm/v/manic?color=informational)
![GitHub](https://img.shields.io/github/license/libitx/manic?color=informational)
![GitHub Workflow Status](https://img.shields.io/github/workflow/status/libitx/manic/Elixir%20CI)
Manic is an Elixir client for interfacing with Bitcoin miner APIs.
Manic is a port of [unwriter's](https://twitter.com/_unwriter)
[Minercraft](https://minercraft.network) library for JavaScript, with some
added Elixir goodies. Like Minercraft, Manic supports the
[beta version of the Merchant API](https://bitcoinsv.io/2020/04/03/miner-id-and-merchant-api-beta-release/),
and its name is a nod to another classic computer game.
## Features
Manic supports the following features:
* Get dynamic fee rates from miners
* Calculate the fee for any transaction
* Push transactions directly to miners
* Get the status of any transaction from miners
* Automatically verifies the JSON Envelope signatures
| Implemented spec | BRFC |
| ---------------- | ---- |
| [Merchant API Specification](https://github.com/bitcoin-sv-specs/brfc-merchantapi) | `ce852c4c2cd1` |
| [Fee Specification](https://github.com/bitcoin-sv-specs/brfc-misc/tree/master/feespec) | `fb567267440a` |
| [JSON Envelope Specification](https://github.com/bitcoin-sv-specs/brfc-misc/tree/master/jsonenvelope) | `298e080a4598` |
## Installation
The package can be installed by adding `manic` to your list of dependencies
in `mix.exs`.
def deps do
[
{:manic, "~> #{ Mix.Project.config[:version] }"}
]
end
## Usage
### 1. Initalize a miner client
Initialize a [`miner`](`t:miner/0`) client with the full URL of the
Merchant API endpoint.
iex> miner = Manic.miner "https://merchantapi.taal.com"
%Manic.Miner{}
A client can aslo be initialized using any of the keys from the list of
`known_miners/0`. Additional headers can also be specified if necessary.
iex> miner = Manic.miner :mempool, headers: [{"token", token}]
%Manic.Miner{}
### 2. Get and calculate fees
The [`miner`](`t:miner/0`) client can then be used to query the miner's
up-to-date fee rates.
iex> Manic.Fees.get(miner)
{:ok, %{
expires: ~U[2020-04-20 16:35:03.168Z],
mine: %{data: 0.5, standard: 0.5},
relay: %{data: 0.25, standard: 0.25},
verified: true
}}
The fee for a transaction can be calculated using the given rates. Manic will
accept hex encoded transaction or a `t:BSV.Tx.t/0`.
iex> Manic.Fees.calculate(rates.mine, tx)
{:ok, 346}
### 3. Push and query transactions
Manic can be used to push transactions directly to the miner. Hex encoded
transactions or `t:BSV.Tx.t/0` structs are accepted.
iex> Manic.TX.push(miner, tx)
{:ok, %{
"api_version" => "0.1.0",
"current_highest_block_hash" => "00000000000000000397a5a37c1f9b409b4b58e76fd6bcac06db1a3004cccb38",
"current_highest_block_height" => 631603,
"miner_id" => "03e92d3e5c3f7bd945dfbf48e7a99393b1bfb3f11f380ae30d286e7ff2aec5a270",
"result_description" => "",
"return_result" => "success",
"timestamp" => "2020-04-21T14:04:39.563Z",
"tx_second_mempool_expiry" => 0,
"txid" => "9c8c5cf37f4ad1a82891ff647b13ec968f3ccb44af2d9deaa205b03ab70a81fa",
"verified" => true
}}
Any transaction's status can be queried by its [`txid`](`t:Manic.TX.txid/0`).
iex> Manic.TX.status(miner, "e4763d71925c2ac11a4de0b971164b099dbdb67221f03756fc79708d53b8800e")
{:ok, %{
"api_version" => "0.1.0",
"block_hash" => "000000000000000000983dee680071d63939f4690a8a797c022eddadc88f925e",
"block_height" => 630712,
"confirmations" => 765,
"miner_id" => "03e92d3e5c3f7bd945dfbf48e7a99393b1bfb3f11f380ae30d286e7ff2aec5a270",
"result_description" => "",
"return_result" => "success",
"timestamp" => "2020-04-20T21:45:38.808Z",
"tx_second_mempool_expiry" => 0,
"verified" => true
}}
## Multi miners
In the examples above, each API function is invoked by passing a single
[`miner`](`t:miner/0`) client. Manic also provides a way of interacting with
multiple miner clients concurrently, and yielding the response from any or all
of the miners.
### 1. Initalize a multi-miner client
Initialize a [`multi miner`](`t:multi_miner/0`) client with a list of miner
Merchant API endpoint details. The list can contain either a full URL, a key
from the list of `known_miners/0`, or a tuple pair containing any additional
options.
iex> Manic.multi([
...> "https://merchantapi.taal.com",
...> :matterpool,
...> {:mempool, headers: [{"token", token}]}
...> ])
%Manic.Multi{}
### 2. Push a tx an any miner
By default, multi miner requests will yield until **any** of the miners
responds. This is allows a transaction to be pushed to multiple miners
concurrently, and return a response when the first response is recieved.
iex> Manic.multi(miners)
...> |> Manic.TX.push(tx)
{^miner, {:ok, %{
"api_version" => "0.1.0",
"current_highest_block_hash" => "00000000000000000397a5a37c1f9b409b4b58e76fd6bcac06db1a3004cccb38",
"current_highest_block_height" => 631603,
"miner_id" => "03e92d3e5c3f7bd945dfbf48e7a99393b1bfb3f11f380ae30d286e7ff2aec5a270",
"result_description" => "",
"return_result" => "success",
"timestamp" => "2020-04-21T14:04:39.563Z",
"tx_second_mempool_expiry" => 0,
"txid" => "9c8c5cf37f4ad1a82891ff647b13ec968f3ccb44af2d9deaa205b03ab70a81fa",
"verified" => true
}}}
### 3. Query all miners concurrently
Alternatively, a [`multi miner`](`t:multi_miner/0`) client can be initialized
with the option `yield: :all` which awaits **all** miner clients to respond
before returning the list of responses. This allows us to compare fees from
multiple miners concurrently.
iex> Manic.multi(miners, yield: :all)
...> |> Manic.Fees.get
[
{^miner, {:ok, %{
expires: ~U[2020-04-20 16:35:03.168Z],
mine: %{data: 0.5, standard: 0.5},
relay: %{data: 0.25, standard: 0.25},
verified: true
}}},
{^miner, {:ok, %{
expires: ~U[2020-04-20 16:35:03.168Z],
mine: %{data: 0.5, standard: 0.5},
relay: %{data: 0.25, standard: 0.25},
verified: true
}}},
{^miner, {:ok, %{
expires: ~U[2020-04-20 16:35:03.168Z],
mine: %{data: 0.5, standard: 0.5},
relay: %{data: 0.25, standard: 0.25},
verified: true
}}}
]
"""
@typedoc "Bitcoin miner API client"
@type miner :: Manic.Miner.t
@typedoc "Bitcoin multi miner API client"
@type multi_miner :: Manic.Multi.t
@doc """
Returns a map of Manic's known miners.
Where a miner is known, a miner client can be initialized with `miner/2`
passing the [`atom`](`t:atom/0`) key of the known miner as the first argument.
## Example
iex> Manic.known_miners
%{
matterpool: "https://merchantapi.matterpool.io",
mempool: "https://www.ddpurse.com/openapi",
taal: "https://merchantapi.taal.com"
}
"""
@spec known_miners() :: map
def known_miners, do: Manic.Miner.known_miners()
@doc """
Returns a [`miner`](`t:miner/0`) client for the given URL.
The `url` argument can either be a full URL for the miner's Merchant API
endpoint, or an [`atom`](`t:atom/0`) key from the result of `known_miners/0`.
## Options
The accepted options are:
* `:headers` - Pass a list of additional headers in tuple pairs.
## Examples
A [`miner`](`t:miner/0`) client can be instantiated with a full URL.
iex> Manic.miner "https://merchantapi.taal.com"
%Manic.Miner{}
Instantiating a known miner with additional headers.
iex> Manic.miner :mempool, headers: [{"token", auth_token}]
%Manic.Miner{}
"""
@spec miner(String.t | atom, keyword) :: miner
def miner(url, options \\ []),
do: Manic.Miner.new(url, options)
@doc """
Returns a [`multi miner`](`t:multi_miner/0`) client for the given list of
Merchant API endpoints.
Each element of the give list can contain the same credentials given to
`miner/2`.
## Options
The accepted options are:
* `:yield` - Set to `:all` to await and return all responses. Defaults to `:any` which awaits and returns the first response.
* `:timeout` - Set a timeout for the reqeusts. Defaults to `5000`. Set to `:infinity` to disable timeout.
## Examples
A [`multi miner`](`t:multi_miner/0`) client can be instantiated with a list
containing either a full URL, a key from the list of `known_miners/0`, or a
tuple pair containing any additional options.
iex> Manic.multi([
...> "https://merchantapi.taal.com",
...> :matterpool,
...> {:mempool, headers: [{"token", token}]}
...> ])
%Manic.Multi{}
"""
@spec multi(list, keyword) :: multi_miner
def multi(urls, options \\ []),
do: Manic.Multi.new(urls, options)
end | lib/manic.ex | 0.906242 | 0.759983 | manic.ex | starcoder |
defmodule AlphaVantage.Forex do
@moduledoc """
A set of functions for fetching forex (FX) rates from [Alpha Vantage](www.alphavantage.co/documentation/#fx).
"""
alias AlphaVantage.Gateway
@doc """
Returns the realtime exchange rate for any pair of digital currency (e.g., Bitcoin) and physical currency (e.g., USD).
Data returned for physical currency (Forex) pairs also include realtime bid and ask prices.
Please reference https://www.alphavantage.co/documentation/#currency-exchange for more detail.
## Parameters
**Required**
- `:from_currency`
The currency you would like to get the exchange rate for. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
- `:to_currency`
The destination currency for the exchange rate. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
_Optional_ (accepted as a keyword list)
- `:datatype`
By default, `datatype: "map"`.
Strings `"map"` and `"json"` are accepted with the following specifications:
- `"map"` returns a map (default);
- `"json"` returns JSON format;
*Please note that `"csv"` and is not yet supported by Alpha Vantage for this function.
"""
@spec exchange_rate(String.t(), String.t(), Keyword.t()) :: Gateway.response()
def exchange_rate(from_currency, to_currency, opts \\ []) do
params = [
function: "CURRENCY_EXCHANGE_RATE",
from_currency: from_currency,
to_currency: to_currency
]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the intraday time series (timestamp, open, high, low, close) of the FX currency pair specified, updated realtime.
Please reference https://www.alphavantage.co/documentation/#fx-intraday for more detail.
## Parameters
**Required**
- `:from_symbol`
The currency you would like to get the exchange rate for. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
- `:to_symbol`
The destination currency for the exchange rate. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
- `:interval`
Time interval between two consecutive data points in the time series.
The following values are supported and accepted as strings: `"1min"`, `"5min"`, `"15min"`, `"30min"`, `"60min"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
- `:outputsize`
- `"compact"` returns only the latest 100 data points in the intraday time series (default);
- `"full"` returns the full-length intraday time series.
The `"compact"` option is recommended if you would like to reduce the data size of each API call.
"""
@spec intraday(String.t(), String.t(), String.t(), Keyword.t()) :: Gateway.response()
def intraday(from_symbol, to_symbol, interval, opts \\ []) do
params = [
function: "FX_INTRADAY",
from_symbol: from_symbol,
to_symbol: to_symbol,
interval: interval
]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the daily time series (timestamp, open, high, low, close) of the FX currency pair specified, updated realtime.
Please reference https://www.alphavantage.co/documentation/#fx-daily for more detail.
## Parameters
**Required**
- `:from_currency`
The currency you would like to get the exchange rate for. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
- `:to_currency`
The destination currency for the exchange rate. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
- `:outputsize`
- `"compact"` returns only the latest 100 data points in the intraday time series (default);
- `"full"` returns the full-length intraday time series.
The `"compact"` option is recommended if you would like to reduce the data size of each API call.
"""
@spec daily(String.t(), String.t(), Keyword.t()) :: Gateway.response()
def daily(from_symbol, to_symbol, opts \\ []) do
params = [function: "FX_DAILY", from_symbol: from_symbol, to_symbol: to_symbol]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the weekly time series (timestamp, open, high, low, close) of the FX currency pair specified, updated realtime.
The latest data point is the prices information for the week (or partial week) containing the current trading day, updated realtime.
Please reference https://www.alphavantage.co/documentation/#fx-weekly for more detail.
## Parameters
**Required**
- `:from_currency`
The currency you would like to get the exchange rate for. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
- `:to_currency`
The destination currency for the exchange rate. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec weekly(String.t(), String.t(), Keyword.t()) :: Gateway.response()
def weekly(from_symbol, to_symbol, opts \\ []) do
params = [function: "FX_WEEKLY", from_symbol: from_symbol, to_symbol: to_symbol]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the monthly time series (timestamp, open, high, low, close) of the FX currency pair specified, updated realtime.
The latest data point is the prices information for the month (or partial month) containing the current trading day, updated realtime.
Please reference https://www.alphavantage.co/documentation/#fx-monthly for more detail.
## Parameters
**Required**
- `:from_currency`
The currency you would like to get the exchange rate for. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
- `:to_currency`
The destination currency for the exchange rate. It can either be a physical currency or digital/crypto currency.
For example: `"USD"`, `"EUR"`, or `"BTC"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec monthly(String.t(), String.t(), Keyword.t()) :: Gateway.response()
def monthly(from_symbol, to_symbol, opts \\ []) do
params = [function: "FX_MONTHLY", from_symbol: from_symbol, to_symbol: to_symbol]
AlphaVantage.query(Keyword.merge(params, opts))
end
end | lib/alpha_vantage/forex.ex | 0.928971 | 0.809125 | forex.ex | starcoder |
defmodule Level10.Games.GameServer do
@moduledoc """
This module contains the logic for the servers that store the state of each
game.
Each server is initialized with an empty Game struct, and then messages sent
to the server will either read from that struct or manipulate it in different
ways.
This module should handle only the most basic logic, while
`Level10.Games.Game` will contain the logic for manipulating the game state.
"""
use GenServer
alias Level10.StateHandoff
alias Level10.Games.{Card, Game, Player}
require Logger
@typedoc "Return values of `start*` functions"
@type on_start :: {:ok, pid} | {:error, {:already_started, pid} | term}
@typep event_type :: atom()
@max_players 6
@spec start_link({Game.join_code(), Player.t(), Settings.t()}, GenServer.options()) :: on_start
def start_link({join_code, player, settings}, options \\ []) do
GenServer.start_link(__MODULE__, {join_code, player, settings}, options)
end
@impl true
def init({join_code, player, settings}) do
Process.flag(:trap_exit, true)
Process.put(:"$initial_call", {Game, :new, 2})
{:ok, {join_code, player, settings}, {:continue, :load_state}}
end
@impl true
def handle_call({:add_to_table, {player_id, table_id, position, cards_to_add}}, _from, game) do
case Game.add_to_table(game, player_id, table_id, position, cards_to_add) do
{:ok, game} ->
broadcast(game.join_code, :hand_counts_updated, Game.hand_counts(game))
broadcast(game.join_code, :table_updated, game.table)
{:reply, :ok, maybe_complete_round(game, player_id)}
error ->
{:reply, error, game}
end
end
def handle_call(:creator, _from, game) do
{:reply, Game.creator(game), game}
end
def handle_call(:current_player, _from, game) do
{:reply, game.current_player, game}
end
def handle_call(:current_round, _from, game) do
{:reply, game.current_round, game}
end
def handle_call(:current_turn_drawn?, _from, game) do
{:reply, game.current_turn_drawn?, game}
end
def handle_call({:delete_player, player_id}, _from, game) do
case Game.delete_player(game, player_id) do
{:ok, game} ->
broadcast(game.join_code, :players_updated, game.players)
{:reply, :ok, game}
error ->
{:reply, error, game}
end
end
def handle_call({:discard, {player_id, card}}, _from, game) do
with ^player_id <- game.current_player.id,
%Game{} = game <- Game.discard(game, card) do
broadcast(game.join_code, :hand_counts_updated, Game.hand_counts(game))
broadcast(game.join_code, :new_discard_top, card)
if Game.round_finished?(game, player_id) do
{:reply, :ok, maybe_complete_round(game, player_id)}
else
broadcast(game.join_code, :skipped_players_updated, game.skipped_players)
broadcast(game.join_code, :new_turn, game.current_player)
{:reply, :ok, game}
end
else
:needs_to_draw -> {:reply, :needs_to_draw, game}
_ -> {:reply, :not_your_turn, game}
end
end
def handle_call({:draw, {player_id, source}}, _from, game) do
case Game.draw_card(game, player_id, source) do
%Game{} = game ->
if source == :discard_pile do
broadcast(game.join_code, :new_discard_top, Game.top_discarded_card(game))
end
broadcast(game.join_code, :hand_counts_updated, Game.hand_counts(game))
[new_card | _] = game.hands[player_id]
{:reply, new_card, game}
error ->
{:reply, error, game}
end
end
def handle_call(:finished?, _from, game) do
{:reply, game.current_stage == :finish, game}
end
def handle_call(:get, _from, game) do
{:reply, game, game}
end
def handle_call(:hand_counts, _from, game) do
{:reply, Game.hand_counts(game), game}
end
def handle_call({:hand, player_id}, _from, game) do
{:reply, game.hands[player_id], game}
end
def handle_call({:join, player}, _from, game) do
with {:ok, updated_game} <- Game.put_player(game, player),
true <- length(updated_game.players) <= @max_players do
broadcast(game.join_code, :players_updated, updated_game.players)
{:reply, :ok, updated_game}
else
:already_started ->
{:reply, :already_started, game}
_ ->
{:reply, :full, game}
end
end
def handle_call(:levels, _from, game) do
{:reply, game.levels, game}
end
def handle_call({:next_player, player_id}, _from, game) do
{:reply, Game.next_player(game, player_id), game}
end
def handle_call({:player_exists?, player_id}, _from, game) do
{:reply, Game.player_exists?(game, player_id), game}
end
def handle_call(:players, _from, game) do
{:reply, game.players, game}
end
def handle_call(:players_ready, _from, game) do
{:reply, game.players_ready, game}
end
def handle_call(:remaining_players, _from, game) do
{:reply, game.remaining_players, game}
end
def handle_call(:round_started?, _from, game) do
{:reply, game.current_stage == :play, game}
end
def handle_call(:round_winner, _from, game) do
{:reply, Game.round_winner(game), game}
end
def handle_call(:scoring, _from, game) do
{:reply, game.scoring, game}
end
def handle_call(:settings, _from, game) do
{:reply, game.settings, game}
end
def handle_call({:skip_player, {player_id, player_to_skip}}, _from, game) do
skip_card = Card.new(:skip)
with ^player_id <- game.current_player.id,
%Game{} = game <- Game.skip_player(game, player_to_skip),
%Game{} = game <- Game.discard(game, skip_card) do
broadcast(game.join_code, :hand_counts_updated, Game.hand_counts(game))
broadcast(game.join_code, :new_discard_top, skip_card)
broadcast(game.join_code, :skipped_players_updated, game.skipped_players)
if Game.round_finished?(game, player_id) do
{:reply, :ok, maybe_complete_round(game, player_id)}
else
broadcast(game.join_code, :new_turn, game.current_player)
{:reply, :ok, game}
end
else
:already_skipped -> {:reply, :already_skipped, game}
:needs_to_draw -> {:reply, :needs_to_draw, game}
_ -> {:reply, :not_your_turn, game}
end
end
def handle_call(:skipped_players, _from, game) do
{:reply, game.skipped_players, game}
end
def handle_call(:started?, _from, game) do
{:reply, game.current_stage != :lobby, game}
end
def handle_call(:table, _from, game) do
{:reply, game.table, game}
end
def handle_call({:table_cards, {player_id, player_table}}, _from, game) do
case Game.set_player_table(game, player_id, player_table) do
%Game{} = game ->
broadcast(game.join_code, :hand_counts_updated, Game.hand_counts(game))
broadcast(game.join_code, :table_updated, game.table)
{:reply, :ok, maybe_complete_round(game, player_id)}
error ->
{:reply, error, game}
end
end
def handle_call(:top_discarded_card, _from, game) do
card = Game.top_discarded_card(game)
{:reply, card, game}
end
@impl true
def handle_cast({:player_ready, player_id}, game) do
with {:all_ready, game} <- Game.mark_player_ready(game, player_id),
{:ok, game} <- Game.start_round(game) do
broadcast(game.join_code, :round_started, nil)
{:noreply, game}
else
:game_over ->
{:noreply, game}
{:ok, game} ->
broadcast(game.join_code, :players_ready, game.players_ready)
{:noreply, game}
end
end
def handle_cast({:remove_player, player_id}, game) do
game = Game.remove_player(game, player_id)
with status when status != :finish <- game.current_stage,
true <- Game.all_ready?(game),
{:ok, game} <- Game.start_round(game) do
broadcast(game.join_code, :round_started, nil)
{:noreply, game}
else
false ->
broadcast(game.join_code, :player_removed, player_id)
{:noreply, game}
:finish ->
broadcast(game.join_code, :player_removed, player_id)
broadcast(game.join_code, :game_finished, nil)
{:noreply, game}
:game_over ->
{:noreply, game}
end
end
def handle_cast(:start_game, game) do
case Game.start_game(game) do
{:ok, game} ->
Logger.info("Started game #{game.join_code}")
broadcast(game.join_code, :game_started, nil)
{:noreply, game}
:single_player ->
broadcast(game.join_code, :start_error, :single_player)
{:noreply, game}
end
end
def handle_cast({:update, fun}, state) do
{:noreply, apply(fun, [state])}
end
@impl true
def handle_continue(:load_state, {join_code, player, settings}) do
game =
case StateHandoff.pickup(join_code) do
nil ->
Logger.info("Creating new game #{join_code}")
Game.new(join_code, player, settings)
game ->
Logger.info("Creating game from state handoff #{join_code}")
game
end
{:noreply, game}
end
# Handle exits whenever a name conflict occurs
@impl true
def handle_info({:EXIT, _pid, {:name_conflict, _, _, _}}, game), do: {:stop, :shutdown, game}
def handle_info({:EXIT, _pid, :shutdown}, game), do: {:noreply, game}
# Matches whenever we manually stop a server since we don't need to move that
# state to a new node
@impl true
def terminate(:normal, _game), do: :ok
# Called when a SIGTERM is received to begin the handoff process for moving
# game state to other nodes
def terminate(_reason, game = %{join_code: join_code}) do
StateHandoff.handoff(join_code, game)
Process.sleep(10)
:ok
end
# Private Functions
@spec broadcast(Game.join_code(), event_type(), term()) :: :ok | {:error, term()}
defp broadcast(join_code, event_type, event) do
Phoenix.PubSub.broadcast(Level10.PubSub, "game:" <> join_code, {event_type, event})
end
@spec broadcast_game_complete(Game.t(), Player.id()) :: :ok | {:error, term()}
defp broadcast_game_complete(game, player_id) do
player = Enum.find(game.players, &(&1.id == player_id))
broadcast(game.join_code, :game_finished, player)
end
@spec broadcast_round_complete(Game.t(), Player.id()) :: Game.t()
defp broadcast_round_complete(game, player_id) do
player = Enum.find(game.players, &(&1.id == player_id))
broadcast(game.join_code, :round_finished, player)
end
@spec maybe_complete_round(Game.t(), Player.id()) :: Game.t()
defp maybe_complete_round(game, player_id) do
with true <- Game.round_finished?(game, player_id),
%{current_stage: :finish} = game <- Game.complete_round(game) do
broadcast_game_complete(game, player_id)
game
else
false ->
game
game ->
broadcast_round_complete(game, player_id)
game
end
end
end | lib/level10/games/game_server.ex | 0.773345 | 0.457864 | game_server.ex | starcoder |
defmodule Astro do
@moduledoc """
Functions for basic astronomical observations such
as sunrise, sunset, solstice, equinox, moonrise,
moonset and moon phase.
"""
alias Astro.{Solar, Lunar, Location, Time, Math, Guards}
import Astro.Math, only: [
sin: 1,
cos: 1,
atan_r: 2,
tan: 1,
mod: 2
]
import Astro.Solar, only: [
obliquity_correction: 1
]
@type longitude :: float()
@type latitude :: float()
@type altitude :: float()
@type degrees :: float()
@type angle() :: number()
@type meters() :: number()
@type phase() :: angle()
@type location :: {longitude, latitude} | Geo.Point.t() | Geo.PointZ.t()
@type date :: Calendar.date() | Calendar.datetime()
@type options :: keyword()
defguard is_lunar_phase(phase) when phase >= 0.0 and phase <= 360.0
@doc """
Returns a `t:Geo.PointZ` containing
the right ascension and declination of
the moon at a given date or date time.
## Arguments
* `date_time` is a `DateTime` or a `Date` or
any struct that meets the requirements of
`t:Calendar.date` or `t:Calendar.datetime`
## Returns
* a `t:Geo.PointZ` struct with coordinates
`{right_ascension, declination, distance}` with properties
`%{reference: :celestial, object: :sun}`.
`distance` is in meters.
## Example
iex> Astro.sun_position_at(~D[1992-10-13])
%Geo.PointZ{
coordinates: {-161.6185428539835, -7.785325031528879, 149169604711.3518},
properties: %{object: :sun, reference: :celestial},
srid: nil
}
"""
@doc since: "0.6.0"
@spec sun_position_at(date()) :: Geo.PointZ.t()
def sun_position_at(unquote(Guards.datetime()) = date_time) do
_ = calendar
date_time
|> Time.date_time_to_moment()
|> Solar.solar_position()
|> convert_distance_to_m()
|> Location.normalize_location()
|> Map.put(:properties, %{reference: :celestial, object: :moon})
end
def sun_position_at(unquote(Guards.date()) = date) do
_ = calendar
date
|> Date.to_gregorian_days()
|> Solar.solar_position()
|> convert_distance_to_m()
|> Location.normalize_location()
|> Map.put(:properties, %{reference: :celestial, object: :sun})
end
defp convert_distance_to_m({lng, lat, alt}) do
{lng, lat, Math.au_to_m(alt)}
end
@doc """
Returns a `t:Geo.PointZ` containing
the right ascension and declination of
the moon at a given date or date time.
## Arguments
* `date_time` is a `DateTime` or a `Date` or
any struct that meets the requirements of
`t:Calendar.date` or `t:Calendar.datetime`
## Returns
* a `t:Geo.PointZ` struct with coordinates
`{right_ascension, declination, distance}` with properties
`%{reference: :celestial, object: :moon}`
`distance` is in meters.
## Example
iex> Astro.moon_position_at(~D[1992-04-12])
%Geo.PointZ{
coordinates: {134.6978882151538, 13.765242742787006, 5.511320224169038e19},
properties: %{object: :moon, reference: :celestial},
srid: nil
}
"""
@doc since: "0.6.0"
@spec moon_position_at(date()) :: Geo.PointZ.t()
def moon_position_at(unquote(Guards.datetime()) = date_time) do
_ = calendar
date_time
|> Time.date_time_to_moment()
|> Lunar.lunar_position()
|> convert_distance_to_m()
|> Location.normalize_location()
|> Map.put(:properties, %{reference: :celestial, object: :moon})
end
def moon_position_at(unquote(Guards.date()) = date) do
_ = calendar
date
|> Date.to_gregorian_days()
|> Lunar.lunar_position()
|> convert_distance_to_m()
|> Location.normalize_location()
|> Map.put(:properties, %{reference: :celestial, object: :moon})
end
@doc """
Returns the illumination of the moon
as a fraction for a given date or date time.
## Arguments
* `date_time` is a `DateTime` or a `Date` or
any struct that meets the requirements of
`t:Calendar.date` or `t:Calendar.datetime`
## Returns
* a `float` value between `0.0` and `1.0`
representing the fractional illumination of
the moon.
## Example
iex> Astro.illuminated_fraction_of_moon_at(~D[2017-03-16])
0.8884442367681415
iex> Astro.illuminated_fraction_of_moon_at(~D[1992-04-12])
0.6786428237168787
iex> Astro.illuminated_fraction_of_moon_at(~U[2017-03-16 19:55:11.0Z])
0.8334019164562495
"""
@doc since: "0.6.0"
@spec illuminated_fraction_of_moon_at(date()) :: number()
def illuminated_fraction_of_moon_at(unquote(Guards.datetime()) = date_time) do
_ = calendar
date_time
|> Time.date_time_to_moment()
|> Lunar.illuminated_fraction_of_moon()
end
def illuminated_fraction_of_moon_at(unquote(Guards.date()) = date) do
_ = calendar
date
|> Date.to_gregorian_days()
|> Lunar.illuminated_fraction_of_moon()
end
@doc """
Returns the date time of the new
moon before a given date or date time.
## Arguments
* `date_time` is a `DateTime` or a `Date` or
any struct that meets the requirements of
`t:Calendar.date` or `t:Calendar.datetime`
## Returns
* `{:ok, date_time}` at which the new moon occurs or
* `{:error, {module, reason}}`
## Example
iex> Astro.date_time_new_moon_before ~D[2021-08-23]
{:ok, ~U[2021-08-08 13:49:07.000000Z]}
"""
@doc since: "0.5.0"
@spec date_time_new_moon_before(date()) ::
{:ok, Calendar.datetime()}, {:error, {module(), String.t}}
def date_time_new_moon_before(unquote(Guards.datetime()) = date_time) do
_ = calendar
date_time
|> Time.date_time_to_moment()
|> Lunar.date_time_new_moon_before()
|> Time.date_time_from_moment()
end
def date_time_new_moon_before(unquote(Guards.date()) = date) do
_ = calendar
date
|> Date.to_gregorian_days()
|> Lunar.date_time_new_moon_before()
|> Time.date_time_from_moment()
end
@doc """
Returns the date time of the new
moon at or after a given date or
date time.
## Arguments
* `date_time` is a `DateTime` or a `Date` or
any struct that meets the requirements of
`t:Calendar.date` or `t:Calendar.datetime`
## Returns
* `{:ok, date_time}` at which the new moon occurs or
* `{:error, {module, reason}}`
## Example
iex> Astro.date_time_new_moon_at_or_after ~D[2021-08-23]
{:ok, ~U[2021-09-07 00:50:43.000000Z]}
"""
@doc since: "0.5.0"
@spec date_time_new_moon_at_or_after(date) ::
{:ok, Calendar.datetime()}, {:error, {module(), String.t}}
def date_time_new_moon_at_or_after(unquote(Guards.datetime()) = datetime) do
_ = calendar
datetime
|> Time.date_time_to_moment()
|> Lunar.date_time_new_moon_at_or_after()
|> Time.date_time_from_moment()
end
def date_time_new_moon_at_or_after(unquote(Guards.date()) = date) do
_ = calendar
date
|> Date.to_gregorian_days()
|> Lunar.date_time_new_moon_at_or_after()
|> Time.date_time_from_moment()
end
@doc """
Returns the lunar phase as a
float number of degrees at a given
date or date time.
## Arguments
* `date_time` is a `DateTime`, `Date` or
a `moment` which is a float number of days
since `0000-01-01`
## Returns
* the lunar phase as a float number of
degrees.
## Example
iex> Astro.lunar_phase_at ~U[2021-08-22 12:01:02.170362Z]
180.00001498208536
iex> Astro.lunar_phase_at(~U[2021-07-10 01:18:25.422335Z])
0.021567106773019873
"""
@doc since: "0.5.0"
@spec lunar_phase_at(date()) :: phase()
def lunar_phase_at(unquote(Guards.datetime()) = date_time) do
_ = calendar
date_time
|> Time.date_time_to_moment()
|> Lunar.lunar_phase_at()
end
def lunar_phase_at(unquote(Guards.date()) = date) do
_ = calendar
date
|> Date.to_gregorian_days()
|> Lunar.lunar_phase_at()
end
@doc """
Returns the moon phase as a UTF8 binary
representing an emoji of the moon phase.
## Arguments
* `phase` is a moon phase between `0.0` and `360.0`
## Returns
* A single grapheme string representing the [Unicode
moon phase emoji](https://unicode-table.com/en/sets/moon/)
## Examples
iex> Astro.lunar_phase_emoji 0
"🌑"
iex> Astro.lunar_phase_emoji 45
"🌒"
iex> Astro.lunar_phase_emoji 90
"🌓"
iex> Astro.lunar_phase_emoji 135
"🌔"
iex> Astro.lunar_phase_emoji 180
"🌕"
iex> Astro.lunar_phase_emoji 245
"🌖"
iex> Astro.lunar_phase_emoji 270
"🌗"
iex> Astro.lunar_phase_emoji 320
"🌘"
iex> Astro.lunar_phase_emoji 360
"🌑"
iex> ~U[2021-08-22 12:01:02.170362Z]
...> |> Astro.lunar_phase_at()
...> |> Astro.lunar_phase_emoji()
"🌕"
"""
@emoji_base 0x1f310
@emoji_phase_count 8
@emoji_phase (360.0 / @emoji_phase_count)
@spec lunar_phase_emoji(phase()) :: String.t()
def lunar_phase_emoji(360) do
lunar_phase_emoji(0)
end
def lunar_phase_emoji(phase) when is_lunar_phase(phase) do
offset = ceil(phase / @emoji_phase + 0.5)
:unicode.characters_to_binary([offset + @emoji_base])
end
@doc """
Returns the date time of a given
lunar phase at or before a given
date time or date.
## Arguments
* `date_time` is a `DateTime` or a `Date` or
any struct that meets the requirements of
`t:Calendar.date` or `t:Calendar.datetime`
* `phase` is the required lunar phase expressed
as a float number of degrees between `0` and
`3660`
## Returns
* `{:ok, date_time}` at which the phase occurs or
* `{:error, {module, reason}}`
## Example
iex> Astro.date_time_lunar_phase_at_or_before(~D[2021-08-01], Astro.Lunar.new_moon())
{:ok, ~U[2021-07-10 01:15:33.000000Z]}
"""
@doc since: "0.5.0"
@spec date_time_lunar_phase_at_or_before(date(), Astro.phase()) ::
{:ok, Calendar.datetime()}, {:error, {module(), String.t}}
def date_time_lunar_phase_at_or_before(unquote(Guards.datetime()) = date_time, phase) do
_ = calendar
date_time
|> Time.date_time_to_moment()
|> Lunar.date_time_lunar_phase_at_or_before(phase)
|> Time.date_time_from_moment()
end
def date_time_lunar_phase_at_or_before(unquote(Guards.date()) = date, phase) do
_ = calendar
date
|> Date.to_gregorian_days()
|> Lunar.date_time_lunar_phase_at_or_before(phase)
|> Time.date_time_from_moment()
end
@doc """
Returns the date time of a given
lunar phase at or after a given
date time or date.
## Arguments
* `date_time` is a `DateTime` or a `Date` or
any struct that meets the requirements of
`t:Calendar.date` or `t:Calendar.datetime`
* `phase` is the required lunar phase expressed
as a float number of degrees between `0.0` and
`360.0`
## Returns
* `{:ok, date_time}` at which the phase occurs or
* `{:error, {module, reason}}`
## Example
iex> Astro.date_time_lunar_phase_at_or_after(~D[2021-08-01], Astro.Lunar.full_moon())
{:ok, ~U[2021-08-22 12:01:02.000000Z]}
"""
@doc since: "0.5.0"
@spec date_time_lunar_phase_at_or_after(date(), Astro.phase()) ::
{:ok, Calendar.datetime()}, {:error, {module(), String.t}}
def date_time_lunar_phase_at_or_after(unquote(Guards.datetime()) = date_time, phase) do
_ = calendar
date_time
|> Time.date_time_to_moment()
|> Lunar.date_time_lunar_phase_at_or_after(phase)
|> Time.date_time_from_moment()
end
def date_time_lunar_phase_at_or_after(unquote(Guards.date()) = date, phase) do
_ = calendar
date
|> Date.to_gregorian_days()
|> Lunar.date_time_lunar_phase_at_or_after(phase)
|> Time.date_time_from_moment()
end
@doc """
Calculates the sunrise for a given location and date.
Sunrise is the moment when the upper limb of
the sun appears on the horizon in the morning.
## Arguments
* `location` is the latitude, longitude and
optionally elevation for the desired sunrise
time. It can be expressed as:
* `{lng, lat}` - a tuple with longitude and latitude
as floating point numbers. **Note** the order of the
arguments.
* a `Geo.Point.t` struct to represent a location without elevation
* a `Geo.PointZ.t` struct to represent a location and elevation
* `date` is a `t:Date`, `t:NaiveDateTime` or `t:DateTime`
to indicate the date of the year in which
the sunrise time is required.
* `options` is a keyword list of options.
## Options
* `solar_elevation` represents the type of sunrise
required. The default is `:geometric` which equates to
a solar elevation of 90°. In this case the calulation
also accounts for refraction and elevation to return a
result which accords with the eyes perception. Other
solar elevations are:
* `:civil` representing a solar elevation of 96.0°. At this
point the sun is just below the horizon so there is
generally enough natural light to carry out most
outdoor activities.
* `:nautical` representing a solar elevation of 102.0°
This is the point at which the horizon is just barely visible
and the moon and stars can still be used for navigation.
* `:astronomical`representing a solar elevation of 108.0°.
This is the point beyond which astronomical observation
becomes impractical.
* Any floating point number representing the desired
solar elevation.
* `:time_zone` is the time zone to in which the sunrise
is requested. The default is `:default` in which
the sunrise time is reported in the time zone of
the requested location. Any other time zone name
supported by the option `:time_zone_database` is
acceptabe.
* `:time_zone_database` represents the module that
implements the `Calendar.TimeZoneDatabase` behaviour.
The default is `Tzdata.TimeZoneDatabase`.
## Returns
* a `DateTime.t` representing the time of sunrise in the
requested timzone at the requested location or
* `{:error, :time_zone_not_found}` if the requested
time zone is unknown
* `{:error, :no_time}` if for the requested date
and location there is no sunrise. This can occur at
very high latitudes during summer and winter.
## Examples
# Sunrise in Sydney, Australia
Astro.sunrise({151.20666584, -33.8559799094}, ~D[2019-12-04])
{:ok, #DateTime<2019-12-04 05:37:00.000000+11:00 AEDT Australia/Sydney>}
# Sunrise in Alert, Nanavut, Canada
Astro.sunrise({-62.3481, 82.5018}, ~D[2019-12-04])
{:error, :no_time}
"""
@spec sunrise(location, date, options) ::
{:ok, DateTime.t()} | {:error, :time_zone_not_found | :no_time}
def sunrise(location, date, options \\ default_options()) when is_list(options) do
options = Keyword.put(options, :rise_or_set, :rise)
Solar.sun_rise_or_set(location, date, options)
end
@doc """
Calculates the sunset for a given location and date.
Sunset is the moment when the upper limb of
the sun disappears below the horizon in the evening.
## Arguments
* `location` is the latitude, longitude and
optionally elevation for the desired sunrise
time. It can be expressed as:
* `{lng, lat}` - a tuple with longitude and latitude
as floating point numbers. **Note** the order of the
arguments.
* a `Geo.Point.t` struct to represent a location without elevation
* a `Geo.PointZ.t` struct to represent a location and elevation
* `date` is a `t:Date`, `t:NaiveDateTime` or `t:DateTime`
to indicate the date of the year in which
the sunset time is required.
* `options` is a keyword list of options.
## Options
* `solar_elevation` represents the type of sunset
required. The default is `:geometric` which equates to
a solar elevation of 90°. In this case the calulation
also accounts for refraction and elevation to return a
result which accords with the eyes perception. Other
solar elevations are:
* `:civil` representing a solar elevation of 96.0°. At this
point the sun is just below the horizon so there is
generally enough natural light to carry out most
outdoor activities.
* `:nautical` representing a solar elevation of 102.0°
This is the point at which the horizon is just barely visible
and the moon and stars can still be used for navigation.
* `:astronomical`representing a solar elevation of 108.0°.
This is the point beyond which astronomical observation
becomes impractical.
* Any floating point number representing the desired
solar elevation.
* `:time_zone` is the time zone to in which the sunset
is requested. The default is `:default` in which
the sunset time is reported in the time zone of
the requested location. Any other time zone name
supported by the option `:time_zone_database` is
acceptabe.
* `:time_zone_database` represents the module that
implements the `Calendar.TimeZoneDatabase` behaviour.
The default is `Tzdata.TimeZoneDatabase`.
## Returns
* a `t:DateTime` representing the time of sunset in the
requested time zone at the requested location or
* `{:error, :time_zone_not_found}` if the requested
time zone is unknown
* `{:error, :no_time}` if for the requested date
and location there is no sunset. This can occur at
very high latitudes during summer and winter.
## Examples
# Sunset in Sydney, Australia
Astro.sunset({151.20666584, -33.8559799094}, ~D[2019-12-04])
{:ok, #DateTime<2019-12-04 19:53:00.000000+11:00 AEDT Australia/Sydney>}
# Sunset in Alert, Nanavut, Canada
Astro.sunset({-62.3481, 82.5018}, ~D[2019-12-04])
{:error, :no_time}
"""
@spec sunset(location, date, options) ::
{:ok, DateTime.t()} | {:error, :time_zone_not_found | :no_time}
def sunset(location, date, options \\ default_options()) when is_list(options) do
options = Keyword.put(options, :rise_or_set, :set)
Solar.sun_rise_or_set(location, date, options)
end
@doc """
Returns the datetime in UTC for either the
March or September equinox.
## Arguments
* `year` is the gregorian year for which the equinox is
to be calculated
* `event` is either `:march` or `:september` indicating
which of the two annual equinox datetimes is required
## Returns
* `{:ok, datetime}` representing the UTC datetime of
the equinox
## Examples
iex> Astro.equinox 2019, :march
{:ok, ~U[2019-03-20 21:58:06Z]}
iex> Astro.equinox 2019, :september
{:ok, ~U[2019-09-23 07:49:30Z]}
## Notes
This equinox calculation is expected to be accurate
to within 2 minutes for the years 1000 CE to 3000 CE.
An equinox is commonly regarded as the instant of
time when the plane of Earth's equator passes through
the center of the Sun. This occurs twice each year:
around 20 March and 23 September.
In other words, it is the moment at which the
center of the visible Sun is directly above the equator.
"""
@spec equinox(Calendar.year(), :march | :september) :: {:ok, DateTime.t()}
def equinox(year, event) when event in [:march, :september] and year in 1000..3000 do
Solar.equinox_and_solstice(year, event)
end
@doc """
Returns the datetime in UTC for either the
June or December solstice.
## Arguments
* `year` is the gregorian year for which the solstice is
to be calculated
* `event` is either `:june` or `:december` indicating
which of the two annual solstice datetimes is required
## Returns
* `{:ok, datetime}` representing the UTC datetime of
the solstice
## Examples
iex> Astro.solstice 2019, :december
{:ok, ~U[2019-12-22 04:18:57Z]}
iex> Astro.solstice 2019, :june
{:ok, ~U[2019-06-21 15:53:45Z]}
## Notes
This solstice calculation is expected to be accurate
to within 2 minutes for the years 1000 CE to 3000 CE.
A solstice is an event occurring when the Sun appears
to reach its most northerly or southerly excursion
relative to the celestial equator on the celestial
sphere. Two solstices occur annually, around June 21
and December 21.
The seasons of the year are determined by
reference to both the solstices and the equinoxes.
The term solstice can also be used in a broader
sense, as the day when this occurs. The day of a
solstice in either hemisphere has either the most
sunlight of the year (summer solstice) or the least
sunlight of the year (winter solstice) for any place
other than the Equator.
Alternative terms, with no ambiguity as to which
hemisphere is the context, are "June solstice" and
"December solstice", referring to the months in
which they take place every year.
"""
@spec solstice(Calendar.year(), :june | :december) :: {:ok, DateTime.t()}
def solstice(year, event) when event in [:june, :december] and year in 1000..3000 do
Solar.equinox_and_solstice(year, event)
end
@doc """
Returns solar noon for a
given date and location as
a UTC datetime
## Arguments
* `location` is the latitude, longitude and
optionally elevation for the desired solar noon
time. It can be expressed as:
* `{lng, lat}` - a tuple with longitude and latitude
as floating point numbers. **Note** the order of the
arguments.
* a `Geo.Point.t` struct to represent a location without elevation
* a `Geo.PointZ.t` struct to represent a location and elevation
* `date` is any date in the Gregorian
calendar (for example, `Calendar.ISO`)
## Returns
* a UTC datetime representing solar noon
at the given location for the given date
## Example
iex> Astro.solar_noon {151.20666584, -33.8559799094}, ~D[2019-12-06]
{:ok, ~U[2019-12-06 01:45:42Z]}
## Notes
Solar noon is the moment when the Sun passes a
location's meridian and reaches its highest position
in the sky. In most cases, it doesn't happen at 12 o'clock.
At solar noon, the Sun reaches its
highest position in the sky as it passes the
local meridian.
"""
@spec solar_noon(Astro.location(), Calendar.date()) :: {:ok, DateTime.t()}
def solar_noon(location, date) do
%Geo.PointZ{coordinates: {longitude, _, _}} = Location.normalize_location(location)
julian_day = Time.julian_day_from_date(date)
julian_centuries = Time.julian_centuries_from_julian_day(julian_day)
julian_centuries
|> Solar.solar_noon_utc(-longitude)
|> Time.datetime_from_date_and_minutes(date)
end
@doc """
Returns solar longitude for a
given date. Solar longitude is used
to identify the seasons.
## Arguments
* `date` is any date in the Gregorian
calendar (for example, `Calendar.ISO`)
## Returns
* a `float` number of degrees between 0 and
360 representing the solar longitude
on `date`
## Examples
iex> Astro.sun_apparent_longitude ~D[2019-03-21]
0.08035853207991295
iex> Astro.sun_apparent_longitude ~D[2019-06-22]
90.32130455695378
iex> Astro.sun_apparent_longitude ~D[2019-09-23]
179.68691978440197
iex> Astro.sun_apparent_longitude ~D[2019-12-23]
270.83941087483504
## Notes
Solar longitude (the ecliptic longitude of the sun)
in effect describes the position of the earth in its
orbit, being zero at the moment of the vernal
equinox.
Since it is based on how far the earth has moved
in its orbit since the equinox, it is a measure of
what time of the tropical year (the year of seasons)
we are in, but without the inaccuracies of a calendar
date, which is perturbed by leap years and calendar
imperfections.
"""
@spec sun_apparent_longitude(Calendar.date()) :: degrees()
def sun_apparent_longitude(date) do
date
|> Time.julian_day_from_date()
|> Time.julian_centuries_from_julian_day()
|> Solar.sun_apparent_longitude()
end
@doc """
Returns the number of hours of daylight for a given
location on a given date.
## Arguments
* `location` is the latitude, longitude and
optionally elevation for the desired hours of
daylight. It can be expressed as:
* `{lng, lat}` - a tuple with longitude and latitude
as floating point numbers. **Note** the order of the
arguments.
* a `Geo.Point.t` struct to represent a location without elevation
* a `Geo.PointZ.t` struct to represent a location and elevation
* `date` is any date in the Gregorian
calendar (for example, `Calendar.ISO`)
## Returns
* `{:ok, time}` where `time` is a `Time.t()`
## Examples
iex> Astro.hours_of_daylight {151.20666584, -33.8559799094}, ~D[2019-12-07]
{:ok, ~T[14:18:45]}
# No sunset in summer
iex> Astro.hours_of_daylight {-62.3481, 82.5018}, ~D[2019-06-07]
{:ok, ~T[23:59:59]}
# No sunrise in winter
iex> Astro.hours_of_daylight {-62.3481, 82.5018}, ~D[2019-12-07]
{:ok, ~T[00:00:00]}
## Notes
In latitudes above the polar circles (approximately
+/- 66.5631 degrees) there will be no hours of daylight
in winter and 24 hours of daylight in summer.
"""
@spec hours_of_daylight(Astro.location(), Calendar.date()) :: {:ok, Elixir.Time.t()}
def hours_of_daylight(location, date) do
with {:ok, sunrise} <- sunrise(location, date),
{:ok, sunset} <- sunset(location, date) do
seconds_of_sunlight = DateTime.diff(sunset, sunrise)
{hours, minutes, seconds} = Time.seconds_to_hms(seconds_of_sunlight)
Elixir.Time.new(hours, minutes, seconds)
else
{:error, :no_time} ->
if no_daylight_hours?(location, date) do
Elixir.Time.new(0, 0, 0)
else
Elixir.Time.new(23, 59, 59)
end
end
end
@polar_circle_latitude 66.5631
defp no_daylight_hours?(location, date) do
%Geo.PointZ{coordinates: {_longitude, latitude, _elevation}} =
Location.normalize_location(location)
cond do
(latitude >= @polar_circle_latitude and date.month in 10..12) or date.month in 1..3 -> true
latitude <= -@polar_circle_latitude and date.month in 4..9 -> true
true -> false
end
end
@doc """
beta and lambda in degrees
"""
@spec declination(Time.moment(), Astro.angle(), Astro.angle()) :: Astro.angle()
def declination(t, beta, lambda) do
julian_centuries = Time.julian_centuries_from_moment(t)
epsilon = obliquity_correction(julian_centuries)
:math.asin(sin(beta) * cos(epsilon) + cos(beta) * sin(epsilon) * sin(lambda))
|> Math.to_degrees
|> mod(360.0)
end
@doc """
beta and lambda in degrees
"""
@spec right_ascension(Time.moment(), Astro.angle(), Astro.angle()) :: Astro.angle()
def right_ascension(t, beta, lambda) do
julian_centuries = Time.julian_centuries_from_moment(t)
epsilon = obliquity_correction(julian_centuries)
# omega = (125.04 - (1_934.136 * julian_centuries))
# adjusted_epsilon = (epsilon + 0.00256 * cos(omega))
atan_r(sin(lambda) * cos(epsilon) - tan(beta) * sin(epsilon), cos(lambda))
|> Math.to_degrees()
end
@doc false
def default_options do
[
solar_elevation: Solar.solar_elevation(:geometric),
time_zone: :default,
time_zone_database: Tzdata.TimeZoneDatabase
]
end
end | lib/astro.ex | 0.95635 | 0.685647 | astro.ex | starcoder |
defmodule Kino.SmartCell do
@moduledoc ~S'''
An interface for defining custom smart cells.
A smart cell is a UI wizard designed for producing a piece of code
that accomplishes a specific task. In other words, a smart cell is
like a code template parameterized through UI interactions.
This module builds on top of `Kino.JS.Live`, consequently keeping
all of its component and communication mechanics. The additional
callbacks specify how the UI maps to source code.
## Usage
Defining a custom cell is similar to writing a regular `Kino.JS.Live`
component, with a couple specifics.
First, we only need to define callbacks, so there is no need for
using `Kino.JS.Live.new/2`. The `c:Kino.JS.Live.init/2` callback
always receives `t:attrs/0` as the first argument.
Second, we add a few new bits, namely `use Kino.SmartCell` and the
two corresponding callback definitions.
Here is an outline of a custom module
defmodule Kino.SmartCell.Custom do
use Kino.JS
use Kino.JS.Live
use Kino.SmartCell, name: "Our custom wizard"
@impl true
def init(attrs, ctx) do
...
end
# Other Kino.JS.Live callbacks
...
@impl true
def to_attrs(ctx) do
...
end
@impl true
def to_source(attrs) do
...
end
end
Additionally, in order for Livebook to pick up the custom cell, we
need to register our module. This usually happens in `application.ex`
Kino.SmartCell.register(Kino.SmartCell.Custom)
## Example
As a minimal example, that's how we can define a cell that allows
editing the underlying code directly through a textarea.
defmodule Kino.SmartCell.Plain do
use Kino.JS
use Kino.JS.Live
use Kino.SmartCell, name: "Plain code editor"
@impl true
def init(attrs, ctx) do
source = attrs["source"] || ""
{:ok, assign(ctx, source: source)}
end
@impl true
def handle_connect(ctx) do
{:ok, %{source: ctx.assigns.source}, ctx}
end
@impl true
def handle_event("update", %{"source" => source}, ctx) do
broadcast_event(ctx, "update", %{"source" => source})
{:noreply, assign(ctx, source: source)}
end
@impl true
def to_attrs(ctx) do
%{"source" => ctx.assigns.source}
end
@impl true
def to_source(attrs) do
attrs["source"]
end
asset "main.js" do
"""
export function init(ctx, payload) {
ctx.importCSS("main.css");
ctx.root.innerHTML = `
<textarea id="source"></textarea>
`;
const textarea = ctx.root.querySelector("#source");
textarea.value = payload.source;
textarea.addEventListener("blur", (event) => {
ctx.pushEvent("update", { source: event.target.value });
});
ctx.handleEvent("update", ({ source }) => {
textarea.value = source;
});
}
"""
end
asset "main.css" do
"""
#source {
box-sizing: border-box;
width: 100%;
min-height: 100px;
}
"""
end
end
And then we would register it as
Kino.SmartCell.register(Kino.SmartCell.Plain)
'''
require Logger
import Kino.Utils, only: [has_function?: 3]
alias Kino.JS.Live.Context
@typedoc """
Attributes are an intermediate form of smart cell state, used to
persist and restore cells.
Attributes are computed using `c:to_attrs/1` and used to generate
the source code using `c:to_source/1`.
Note that attributes are serialized and deserialized as JSON for
persistence, hence make sure to use JSON-friendly data structures.
Persisted attributes are passed to `c:Kino.JS.Live.init/2` as the
first argument and should be used to restore the relevant state.
"""
@type attrs :: map()
@doc """
Invoked to compute the smart cell state as serializable attributes.
"""
@callback to_attrs(ctx :: Context.t()) :: attrs()
@doc """
Invoked to generate source code based on the given attributes.
"""
@callback to_source(attrs()) :: String.t()
@doc """
Invoked whenever the base evaluation context changes.
This callback receives the binding and environment available to the
smart cell code.
Note that this callback runs asynchronously and it receives the PID
of the smart cell server, so the result needs to be sent explicitly
and handled using `c:Kino.JS.Live.handle_info/2`.
**Important:** remember that data sent between processes is copied,
so avoid sending large data structures. In particular, when looking
at variables, instead of sending their values, extract and send
only the relevant metadata.
**Important:** avoid any heavy work in this callback, as it runs in
the same process that evaluates code, so we don't want to block it.
"""
@callback scan_binding(server :: pid(), Code.binding(), Macro.Env.t()) :: any()
@doc """
Invoked when the smart cell code is evaluated.
This callback receives the result of an evaluation, either the
return value or an exception if raised.
This callback runs asynchronously and has the same characteristics
as `c:scan_binding/3`.
"""
@callback scan_eval_result(server :: pid(), eval_result()) :: any()
@type eval_result ::
{:ok, result :: any()}
| {:error, Exception.kind(), error :: any(), Exception.stacktrace()}
@optional_callbacks scan_binding: 3, scan_eval_result: 2
defmacro __using__(opts) do
quote location: :keep, bind_quoted: [opts: opts] do
@behaviour Kino.SmartCell
@smart_opts opts
@before_compile Kino.SmartCell
end
end
defmacro __before_compile__(env) do
opts = Module.get_attribute(env.module, :smart_opts)
name = Keyword.fetch!(opts, :name)
quote do
def child_spec(%{ref: ref, attrs: attrs, target_pid: target_pid}) do
%{
id: __MODULE__,
start: {Kino.SmartCell.Server, :start_link, [__MODULE__, ref, attrs, target_pid]},
restart: :temporary
}
end
def __smart_definition__() do
%{
kind: Atom.to_string(__MODULE__),
module: __MODULE__,
name: unquote(name)
}
end
end
end
@doc """
Returns a list of available smart cell definitions.
"""
def definitions() do
for module <- get_modules(), do: module.__smart_definition__()
end
@doc """
Registers a new smart cell.
This should usually be called in `application.ex` when starting
the application.
## Examples
Kino.SmartCell.register(Kino.SmartCell.Custom)
"""
@spec register(module()) :: :ok
def register(module) do
unless has_function?(module, :__smart_definition__, 0) do
raise ArgumentError, "module #{inspect(module)} does not define a smart cell"
end
modules = get_modules()
updated_modules = if module in modules, do: modules, else: modules ++ [module]
put_modules(updated_modules)
end
@registry_key :smart_cell_modules
defp get_modules() do
Application.get_env(:kino, @registry_key, [])
end
defp put_modules(modules) do
Application.put_env(:kino, @registry_key, modules)
end
end | lib/kino/smart_cell.ex | 0.903676 | 0.548794 | smart_cell.ex | starcoder |
defmodule AWS.SSO do
@moduledoc """
AWS Single Sign-On Portal is a web service that makes it easy for you to
assign user access to AWS SSO resources such as the user portal. Users can
get AWS account applications and roles assigned to them and get federated
into the application.
For general information about AWS SSO, see [What is AWS Single
Sign-On?](https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
in the *AWS SSO User Guide*.
This API reference guide describes the AWS SSO Portal operations that you
can call programatically and includes detailed information on data types
and errors.
<note> AWS provides SDKs that consist of libraries and sample code for
various programming languages and platforms, such as Java, Ruby, .Net, iOS,
or Android. The SDKs provide a convenient way to create programmatic access
to AWS SSO and other AWS services. For more information about the AWS SDKs,
including how to download and install them, see [Tools for Amazon Web
Services](http://aws.amazon.com/tools/).
</note>
"""
@doc """
Returns the STS short-term credentials for a given role name that is
assigned to the user.
"""
def get_role_credentials(client, account_id, role_name, access_token, options \\ []) do
path_ = "/federation/credentials"
headers = []
headers = if !is_nil(access_token) do
[{"x-amz-sso_bearer_token", access_token} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(role_name) do
[{"role_name", role_name} | query_]
else
query_
end
query_ = if !is_nil(account_id) do
[{"account_id", account_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists all roles that are assigned to the user for a given AWS account.
"""
def list_account_roles(client, account_id, max_results \\ nil, next_token \\ nil, access_token, options \\ []) do
path_ = "/assignment/roles"
headers = []
headers = if !is_nil(access_token) do
[{"x-amz-sso_bearer_token", access_token} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(next_token) do
[{"next_token", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"max_result", max_results} | query_]
else
query_
end
query_ = if !is_nil(account_id) do
[{"account_id", account_id} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists all AWS accounts assigned to the user. These AWS accounts are
assigned by the administrator of the account. For more information, see
[Assign User
Access](https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers)
in the *AWS SSO User Guide*. This operation returns a paginated response.
"""
def list_accounts(client, max_results \\ nil, next_token \\ nil, access_token, options \\ []) do
path_ = "/assignment/accounts"
headers = []
headers = if !is_nil(access_token) do
[{"x-amz-sso_bearer_token", access_token} | headers]
else
headers
end
query_ = []
query_ = if !is_nil(next_token) do
[{"next_token", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"max_result", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Removes the client- and server-side session that is associated with the
user.
"""
def logout(client, input, options \\ []) do
path_ = "/logout"
{headers, input} =
[
{"accessToken", "x-amz-sso_bearer_token"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, Poison.Parser.t(), Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "awsssoportal"}
host = build_host("portal.sso", client)
url = host
|> build_url(path, client)
|> add_query(query)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode_payload(input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(method, url, payload, headers, options, success_status_code)
end
defp perform_request(method, url, payload, headers, options, nil) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, response}
{:ok, %HTTPoison.Response{status_code: status_code, body: body} = response}
when status_code == 200 or status_code == 202 or status_code == 204 ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp perform_request(method, url, payload, headers, options, success_status_code) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: ""} = response} ->
{:ok, %{}, response}
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, []) do
url
end
defp add_query(url, query) do
querystring = AWS.Util.encode_query(query)
"#{url}?#{querystring}"
end
defp encode_payload(input) do
if input != nil, do: Poison.Encoder.encode(input, %{}), else: ""
end
end | lib/aws/sso.ex | 0.780495 | 0.422922 | sso.ex | starcoder |
defmodule ShopifyAPI.REST do
@moduledoc """
Provides core REST actions for interacting with the Shopify API.
Uses an `AuthToken` for authorization and request rate limiting.
Please don't use this module directly. Instead prefer the higher-level modules
implementing appropriate resource endpoints, such as `ShopifyAPI.REST.Product`
"""
alias ShopifyAPI.AuthToken
alias ShopifyAPI.JSONSerializer
alias ShopifyAPI.REST.Request
@default_pagination Application.compile_env(:shopify_api, :pagination, :auto)
@doc """
Underlying utility retrieval function. The options passed affect both the
return value and, ultimately, the number of requests made to Shopify.
## Options
`:pagination` - Can be `:none`, `:stream`, or `:auto`. Defaults to :auto
`:auto` will block until all the pages have been retrieved and concatenated together.
`:none` will only return the first page. You won't have access to the headers to manually
paginate.
`:stream` will return a `Stream`, prepopulated with the first page.
"""
@spec get(AuthToken.t(), path :: String.t(), keyword(), keyword()) ::
{:ok, %{required(String.t()) => [map()]}} | Enumerable.t()
def get(%AuthToken{} = auth, path, params \\ [], options \\ []) do
{pagination, opts} = Keyword.pop(options, :pagination, @default_pagination)
case pagination do
:none ->
with {:ok, response} <- Request.perform(auth, :get, path, "", params, opts) do
{:ok, fetch_body(response)}
end
:stream ->
Request.stream(auth, path, params, opts)
:auto ->
auth
|> Request.stream(path, params, opts)
|> collect_results()
end
end
@spec collect_results(Enumerable.t()) ::
{:ok, list()} | {:error, HTTPoison.Response.t() | any()}
defp collect_results(stream) do
stream
|> Enum.reduce_while({:ok, []}, fn
{:error, _} = error, {:ok, _acc} -> {:halt, error}
result, {:ok, acc} -> {:cont, {:ok, [result | acc]}}
end)
|> case do
{:ok, results} -> {:ok, Enum.reverse(results)}
error -> error
end
end
@doc false
def post(%AuthToken{} = auth, path, object \\ %{}, options \\ []) do
with {:ok, body} <- JSONSerializer.encode(object) do
perform_request(auth, :post, path, body, options)
end
end
@doc false
def put(%AuthToken{} = auth, path, object, options \\ []) do
with {:ok, body} <- JSONSerializer.encode(object) do
perform_request(auth, :put, path, body, options)
end
end
@doc false
def delete(%AuthToken{} = auth, path), do: perform_request(auth, :delete, path)
defp perform_request(auth, method, path, body \\ "", options \\ []) do
with {:ok, response} <- Request.perform(auth, method, path, body, [], options),
response_body <- fetch_body(response) do
{:ok, response_body}
end
end
defp fetch_body(http_response) do
Map.fetch!(http_response, :body)
end
end | lib/shopify_api/rest.ex | 0.840259 | 0.455925 | rest.ex | starcoder |
defmodule StaffNotesWeb.LayoutView do
@moduledoc """
View functions for the site's main page layout.
"""
use StaffNotesWeb, :view
alias StaffNotes.Accounts.User
@typedoc """
The application name as an atom.
"""
@type app_name :: atom
@typedoc """
The link information as a tuple of the author's name and URL to link to.
"""
@type link_info :: {String.t(), String.t()}
@typedoc """
Represents a type that may be a user or may represent nothing.
"""
@type maybe_user :: User.t() | nil
@doc """
Renders the GitHub-style `<> with ♥ by [author link]` footer item.
This function can read the author link information from the `Application` environment. You can set
the author link information by adding the following to your `config.exs`:
```
config :app_name,
author_name: "<NAME>",
author_url: "https://example.com"
```
Or you can supply the author link information as a `{name, url}` tuple as the first argument.
## Options
Options are used to customize the rendering of the element.
* `:link_options` -- A keyword list passed as attributes to the author link `a` tag
* All other options are applied as attributes to the containing `div` element
## Examples
```
Phoenix.HTML.safe_to_string(LayoutView.code_with_heart(:app_name))
#=> "<svg .../> with <svg .../> by <a href=\"https://example.com\">Author's Name</a>"
```
"""
@spec code_with_heart(app_name | link_info, Keyword.t()) :: Phoenix.HTML.safe()
def code_with_heart(link_info, options \\ [])
def code_with_heart(app_name, options) when is_atom(app_name) do
name = Application.get_env(app_name, :author_name)
location = Application.get_env(app_name, :author_url)
code_with_heart({name, location}, options)
end
def code_with_heart({name, location} = tuple, options) when is_tuple(tuple) do
{link_options, _options} = Keyword.pop(options, :link_options)
link_options = Keyword.merge(link_options || [], to: location)
html_escape([
octicon(:code),
gettext(" with "),
octicon(:heart),
gettext(" by "),
link(name, link_options)
])
end
@doc """
Renders the link to the source repository.
## Options
* `:text` -- If `:text` is true, use `GitHub` as the link text; otherwise use the [GitHub mark
octicon][mark-github] _(defaults to `false`)_
* All other options are passed to `Phoenix.HTML.Link.link/2`
[mark-github]: https://octicons.github.com/icon/mark-github/
"""
@spec github_link(atom | String.t(), Keyword.t()) :: Phoenix.HTML.safe()
def github_link(url, options \\ [])
def github_link(app_name, options) when is_atom(app_name) do
repo_url = Application.get_env(app_name, :github_url)
github_link(repo_url, options)
end
def github_link(repo_url, options) when is_binary(repo_url) and is_list(options) do
{text, options} = Keyword.pop(options, :text)
options = Keyword.merge(options, to: repo_url)
link(github_link_text(text), options)
end
defp github_link_text(true), do: "GitHub"
defp github_link_text(_), do: octicon("mark-github")
@doc """
Renders the appropriate login buttons depending on whether the user is signed in.
When `current_user` is `nil`, the login button is displayed. When `current_user` is defined, a
logout link and link to the user's profile page is displayed.
"""
@spec login_button(Plug.Conn.t(), maybe_user) :: Phoenix.HTML.safe()
def login_button(conn, current_user)
def login_button(conn, nil) do
link to: auth_path(conn, :index, from: conn.request_path), class: "btn" do
[
gettext("Sign in with"),
octicon("mark-github")
]
end
end
def login_button(conn, %User{} = current_user) do
[
link to: auth_path(conn, :delete) do
[
octicon("sign-out"),
" ",
gettext("Sign Out")
]
end,
link to: user_path(conn, :show, current_user) do
[
to_string(current_user.name),
avatar(current_user, size: 36)
]
end
]
end
@doc """
Renders the flash content.
## Examples
```
render_flash(@conn)
```
"""
@spec render_flash(Plug.Conn.t() | Map.t()) :: Phoenix.HTML.safe()
def render_flash(flash_info)
def render_flash(%Plug.Conn{} = conn), do: render_flash(get_flash(conn))
def render_flash(flash), do: render_flash([], flash)
defp render_flash(content, %{error: message} = flash) do
{:safe, error_flash} = content_tag(:p, message, role: "alert", class: "flash flash-error")
render_flash([error_flash | content], Map.drop(flash, [:error]))
end
defp render_flash(content, %{info: message} = flash) do
{:safe, info_flash} = content_tag(:p, message, role: "alert", class: "flash")
render_flash([info_flash | content], Map.drop(flash, [:info]))
end
defp render_flash(content, _), do: {:safe, content}
end | lib/staff_notes_web/views/layout_view.ex | 0.860706 | 0.759716 | layout_view.ex | starcoder |
defmodule Sanbase.Utils.ListSelector.Transform do
import Sanbase.DateTimeUtils
def args_to_filters_combinator(args) do
(get_in(args, [:selector, :filters_combinator]) || "and")
|> to_string()
|> String.downcase()
end
def args_to_base_projects(args) do
case get_in(args, [:selector, :base_projects]) do
nil -> :all
data -> data
end
end
def args_to_filters(args) do
(get_in(args, [:selector, :filters]) || [])
|> Enum.map(&transform_from_to/1)
|> Enum.map(&update_dynamic_datetimes/1)
|> Enum.map(&atomize_values/1)
end
def args_to_order_by(args) do
get_in(args, [:selector, :order_by])
|> transform_from_to()
|> update_dynamic_datetimes()
|> atomize_values()
end
def args_to_pagination(args) do
get_in(args, [:selector, :pagination])
end
def atomize_values(nil), do: nil
def atomize_values(%{args: args} = map) do
%{map | args: atomize_values(args)}
end
def atomize_values(map) when is_map(map) do
{to_atomize, rest} = Map.split(map, [:operator, :aggregation, :direction])
to_atomize
|> Enum.into(%{}, fn {k, v} ->
v = if is_binary(v), do: String.to_existing_atom(v), else: v
{k, v}
end)
|> Map.merge(rest)
end
def atomize_values(data), do: data
def transform_from_to(%{from: %DateTime{}, to: %DateTime{}} = map), do: map
def transform_from_to(%{from: "utc_now" <> _ = from, to: "utc_now" <> _ = to} = map) do
%{
map
| from: utc_now_string_to_datetime!(from) |> round_datetime(rounding: :up),
to: utc_now_string_to_datetime!(to) |> round_datetime(rounding: :up)
}
end
def transform_from_to(%{from: "utc_now" <> _}),
do: {:error, "Cannot use dynamic 'from' without dynamic 'to'"}
def transform_from_to(%{to: "utc_now" <> _}),
do: {:error, "Cannot use dynamic 'from' without dynamic 'from'"}
def transform_from_to(%{from: from, to: to} = map) when is_binary(from) and is_binary(to) do
%{
map
| from: from_iso8601!(from) |> round_datetime(rounding: :up),
to: from_iso8601!(to) |> round_datetime(rounding: :up)
}
end
def transform_from_to(%{args: %{} = args} = map) do
%{map | args: transform_from_to(args)}
end
def transform_from_to(map), do: map
def update_dynamic_datetimes(nil), do: nil
def update_dynamic_datetimes(%{args: args} = filter) do
case update_dynamic_datetimes(args) do
%{} = updated_args ->
%{filter | args: updated_args}
{:error, error} ->
{:error, error}
end
end
def update_dynamic_datetimes(%{} = map) do
dynamic_from = Map.get(map, :dynamic_from)
dynamic_to = Map.get(map, :dynamic_to)
case {dynamic_from, dynamic_to} do
{nil, nil} ->
map
{nil, _} ->
{:error, "Cannot use 'dynamic_to' without 'dynamic_from'."}
{_, nil} ->
{:error, "Cannot use 'dynamic_from' without 'dynamic_to'."}
_ ->
now = Timex.now()
shift_to_by = if dynamic_to == "now", do: 0, else: str_to_sec(dynamic_to)
from = Timex.shift(now, seconds: -str_to_sec(dynamic_from))
to = Timex.shift(now, seconds: -shift_to_by)
map
|> Map.put(:from, from |> round_datetime(rounding: :up))
|> Map.put(:to, to |> round_datetime(rounding: :up))
end
end
def update_dynamic_datetimes(filter), do: filter
end | lib/sanbase/utils/list_selector_transform.ex | 0.596903 | 0.593285 | list_selector_transform.ex | starcoder |
defmodule WxWinObj.API do
require Logger
@moduledoc """
An API to make using WxWindowObject simpler, and tomake sure any errors are
reported in the callers code.
"""
@doc """
Create a new window and optionally show it.
- **windowSpec**: The name of the module containing the window specification (see WxDsl).
- **evtHandler**: The name of th emodule containing the event handling code.
- **options**: Options when creating a window.They can be:
* _show:_ Bool, Show the window when created(default)
* _name:_ The name that the window will be registered with. if a name is not supplied
or is nil, then the name of the module containing the winowSpec will be used to register the window.
```
start_link(MyWindow, MyWindowEvents, show: true, name: MyWindow)
```
"""
def newWindow(windowSpec, windowLogic, options \\ []) do
Logger.debug("newWindow(#{inspect(windowSpec)}, #{inspect(windowLogic)},#{inspect(options)})")
case WxWinObj.start_link(windowSpec, windowLogic, options) do
{:ok, window} ->
window
{:error, reason} ->
{:error, reason}
end
end
@doc """
Show a window.
the window parameter may be either the name given when the window was created,
or the PID returned by the new window call.
"""
def showWindow(window) do
Logger.debug("showWindow(#{inspect(window)})")
case checkPid(window) do
{:error, reason} -> {:error, reason}
window -> WxWinObj.show(window)
end
end
@doc """
Hide a window.
the window parameter may be either the name given when the window was created,
or the PID returned by the new window call.
"""
def hideWindow(window) do
case checkPid(window) do
{:error, reason} -> {:error, reason}
window -> WxWinObj.show(window)
end
end
@doc """
Close a window.
the window parameter may be either the name given when the window was created,
or the PID returned by the new window call.
"""
def closeWindow(window) do
Logger.debug("closeWindow(#{inspect(window)})")
case checkPid(window) do
{:error, reason} ->
{:error, reason}
# window -> WxWinObj.close(window)
Logger.warn("closeWindow not yet implemented")
end
end
@doc """
Wait for window to close.
All events apart from the termiantion event from the specified window will be
discarded. If window is nil, then a termination event from any window will cause
a return.
If a timeout is supplied, then waitForTermination will return after the
specified number of seconds.
Returns:
- {windowName, :window_closed, reason}
- :timeout
"""
def waitForWindowClose(waitWindow, timeout \\ -1) when is_integer(timeout) do
case timeout do
-1 ->
receive do
{windowName, :child_window_closed, reason} ->
Logger.debug(
"Msg received: :child_window_closed window = #{inspect(windowName)}, waitWindow = #{
inspect(waitWindow)
}"
)
cond do
windowName == waitWindow -> {windowName, :window_closed, reason}
true -> waitForWindowClose(waitWindow, timeout)
end
{:EXIT, pid, :normal} ->
Logger.debug("EXIT received: #{inspect({:EXIT, pid, :normal})}")
msg ->
Logger.info("Msg received: #{inspect(msg)}")
waitForWindowClose(waitWindow, timeout)
end
timeout ->
receive do
{windowName, :child_window_closed, reason} ->
Logger.info(
"Msg received: :child_window_closed window = #{inspect(windowName)}, waitWindow = #{
inspect(waitWindow)
}"
)
cond do
windowName == waitWindow -> {windowName, :window_closed, reason}
true -> waitForWindowClose(waitWindow, timeout)
end
_ ->
waitForWindowClose(waitWindow, timeout)
after
timeout ->
:timeout
end
end
end
# ----------------------------------------------------------------------------
defp checkPid(window) when is_pid(window) or is_atom(window) do
window
end
defp checkPid(_window) do
{:error, "Window identifier must be an atom or a PID"}
end
def close(_window) do
Logger.warn("close/1 not implemented")
end
end | lib/ElixirWx/WxWindowObjectApi.ex | 0.852767 | 0.670676 | WxWindowObjectApi.ex | starcoder |
defmodule Stargate.Receiver.Processor do
@moduledoc """
Defines a `Stargate.Receiver.Processor` module as a GenStage
process under the consumer or reader supervision tree.
The processor stage performs the message handling step for all
messages received on the connection by storing and calling the
application's handler module on each message received.
To better handle complex or long-running operations when handling
messages, the processor stage can be scaled horizontally and takes
care of the necessary subscriptions both upstream and downstream
within the GenStage pipeline.
During initialization, the processor stage stores several
pieces of information in its process dictionary that are
available to the application's message handler module when
handling messages if necessary including the topicc, namespace,
tenant, and persistence of the connection.
"""
use GenStage
import Stargate.Supervisor, only: [via: 2]
@type raw_message :: String.t()
defmodule State do
@moduledoc """
Defines the struct `Stargate.Receiver.Processor` uses
to store its state.
Records the name of the process registry, the path parameters
(persistence, tenant, namespace, and topic) as well as the
handler module defined by the calling application, any init
args rrequired for a stateful handler and the state of the handler.
"""
defstruct [
:registry,
:topic,
:namespace,
:tenant,
:persistence,
:handler,
:handler_init_args,
:handler_state
]
end
@doc """
Starts a `Stargate.Receiver.Processor` GenStage process and
links it to the calling process.
Passes the configuration from the supervisors to the stage to
initialize its state and setup subscription to the
`Stargate.Receiver.Dispatcher` producer stage.
"""
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(init_args) do
registry = Keyword.fetch!(init_args, :registry)
name = Keyword.fetch!(init_args, :processor_name)
GenStage.start_link(__MODULE__, init_args, name: via(registry, name))
end
@impl GenStage
def init(init_args) do
state = %State{
registry: Keyword.fetch!(init_args, :registry),
topic: Keyword.fetch!(init_args, :topic),
namespace: Keyword.fetch!(init_args, :namespace),
tenant: Keyword.fetch!(init_args, :tenant),
persistence: Keyword.get(init_args, :persistence, "persistent"),
handler: Keyword.fetch!(init_args, :handler),
handler_init_args: Keyword.get(init_args, :handler_init_args, [])
}
Process.put(:sg_topic, state.topic)
Process.put(:sg_namespace, state.namespace)
Process.put(:sg_tenant, state.tenant)
Process.put(:sg_persistence, state.persistence)
dispatcher =
via(
state.registry,
{:dispatcher, "#{state.persistence}", "#{state.tenant}", "#{state.namespace}",
"#{state.topic}"}
)
{:ok, handler_state} = state.handler.init(state.handler_init_args)
{:producer_consumer, %{state | handler_state: handler_state},
subscribe_to: [{dispatcher, []}]}
end
@impl GenStage
def handle_events(messages, _from, state) do
decoded_messages =
decode_messages(messages, state.persistence, state.tenant, state.namespace, state.topic)
{_, new_handler_state, responses} = handle_messages(decoded_messages, state)
message_ids = Enum.map(decoded_messages, fn message -> message.message_id end)
tagged_responses = Enum.zip(Enum.reverse(responses), message_ids)
{:noreply, tagged_responses, %{state | handler_state: new_handler_state}}
end
@impl GenStage
def handle_info(_, state), do: {:noreply, [], state}
defp decode_messages(messages, persistence, tenant, namespace, topic) do
Enum.map(messages, &decode_message(&1, persistence, tenant, namespace, topic))
end
defp decode_message(message, persistence, tenant, namespace, topic) do
message |> Jason.decode!() |> Stargate.Message.new(persistence, tenant, namespace, topic)
end
defp handle_messages(messages, %{handler: handler, handler_state: state}) do
Enum.reduce(messages, {handler, state, []}, &process_handler/2)
end
defp process_handler(message, {handler, state, responses}) do
{response, new_state} = handler.handle_message(message, state)
{handler, new_state, [response | responses]}
end
end | lib/stargate/receiver/processor.ex | 0.888402 | 0.510619 | processor.ex | starcoder |
defmodule DarknetToOnnx.Helper do
@moduledoc """
Helper class used for creating tensors
(partially ported from: https://github.com/onnx/onnx/blob/master/onnx/helper.py)
"""
use Agent, restart: :transient
@onnx_opset_version 15
@onnx_ir_version 8
alias DarknetToOnnx.Mapping
alias Onnx.ModelProto, as: Model
alias Onnx.GraphProto, as: Graph
alias Onnx.NodeProto, as: Node
alias Onnx.ValueInfoProto, as: Value
alias Onnx.AttributeProto, as: Attribute
alias Onnx.OperatorSetIdProto, as: Opset
alias Onnx.TypeProto, as: Type
alias Onnx.TensorProto, as: Placeholder
alias Onnx.TypeProto.Tensor, as: TensorTypeProto
alias Onnx.TensorShapeProto, as: Shape
alias Onnx.TensorShapeProto.Dimension, as: Dimension
# Checks whether a variable is enumerable and not a struct
defp is_enum?(var) do
is_list(var) or
(is_map(var) and not Map.has_key?(var, :__struct__)) or
is_tuple(var)
end
defp data_type_id_from_atom(data_type) when is_atom(data_type) do
# Get the data_type number from atom
Enum.find(Placeholder.DataType.constants(), fn {n, t} ->
t == data_type && n
end)
end
@doc """
Construct an OperatorSetIdProto.
Arguments:
domain (string): The domain of the operator set id
version (integer): Version of operator set id
"""
def make_operatorsetid(domain, version) do
%Opset{
domain: domain,
version: version
}
end
defp parse_data_type(data_type) do
parsed_data_type =
cond do
is_atom(data_type) ->
# Check for an existing type identified by the atom
data_type_id_from_atom(data_type)
is_number(data_type) ->
# Check for an existing type identified by the number
Enum.fetch(Placeholder.DataType.constants(), data_type)
true ->
nil
end
case parsed_data_type do
{:ok, dt} ->
dt
{n, a} when is_number(n) and is_atom(a) ->
parsed_data_type
_ ->
max_data_type_id = Enum.count(Placeholder.DataType.constants()) - 1
raise ArgumentError,
"Wrong data_type format. Expected atom or number<#{max_data_type_id}, got: #{data_type}"
end
end
@doc """
Make a TensorProto with specified arguments. If raw is false, this
function will choose the corresponding proto field to store the
values based on data_type. If raw is true, use "raw_data" proto
field to store the values, and values should be of type bytes in
this case.
"""
def make_tensor(name, data_type, dims, vals, raw \\ false) do
{data_type_id, data_type_atom} = parse_data_type(data_type)
if data_type_id == 8 and raw == true,
do: raise(ArgumentError, "Can not use raw_data to store string type")
# itemsize = Mapping.tensor_type_to_nx_size()[data_type_atom]
# expected_size = (raw == false && 1) || itemsize
expected_size = 1
expected_size = Enum.reduce(Tuple.to_list(dims), expected_size, fn val, acc -> acc * val end)
vals = if is_list(vals) and Enum.count(vals) > 1 do
List.flatten(vals)
else
vals
end
if raw == false and Enum.count(vals) != expected_size,
do:
raise(
ArgumentError,
"Number of values does not match tensor's size. Expected #{expected_size}, but it is #{Enum.count(vals)}. "
)
tensor = %Placeholder{
data_type: data_type_id,
name: name,
dims: Tuple.to_list(dims)
}
if raw == true do
%{tensor | raw_data: vals}
else
tvalue =
cond do
# float16/bfloat16 are stored as uint16
data_type_atom == :FLOAT16 or data_type_atom == :BFLOAT16 ->
Nx.tensor(vals, type: {:f, 16})
|> Nx.bitcast({:u, 16})
|> Nx.to_flat_list()
data_type_atom != :COMPLEX64 and data_type_atom != :COMPLEX128 ->
vals
true ->
raise ArgumentError, "Unsupported data type: #{data_type_atom}"
end
Map.replace(
tensor,
Mapping.storage_tensor_type_to_field()[
Mapping.tensor_type_atom_to_storage_type()[data_type_atom]
],
tvalue
)
end
end
@doc """
Create a ValueInfoProto structure with internal TypeProto structures
"""
def make_tensor_value_info(name, elem_type, shape, doc_string \\ "", shape_denotation \\ "") do
{elem_type_id, _elem_type_atom} = parse_data_type(elem_type)
the_type = make_tensor_type_proto(elem_type_id, shape, shape_denotation)
%Value{
name: name,
doc_string: (doc_string !== "" && doc_string) || "",
type: the_type
}
end
@doc """
Create a TypeProto structure to be used by make_tensor_value_info
"""
def make_tensor_type_proto(elem_type, shape, shape_denotation \\ []) do
%Type{
value:
{:tensor_type,
%TensorTypeProto{
elem_type: elem_type,
shape:
if shape != nil do
if is_enum?(shape_denotation) == true and Enum.count(shape_denotation) != 0 and
Enum.count(shape_denotation) != tuple_size(shape) do
raise "Invalid shape_denotation. Must be the same length as shape."
end
%Shape{dim: create_dimensions(shape, shape_denotation)}
else
%Shape{}
end
}}
}
end
# Create a TensorShapeProto.Dimension structure based on shape types
defp create_dimensions(shape, shape_denotation) do
list_shape = (is_tuple(shape) && Tuple.to_list(shape)) || shape
list_shape
|> Enum.with_index()
|> Enum.map(fn {acc, index} ->
cond do
is_integer(acc) ->
%Dimension{
value: {:dim_value, acc},
denotation:
if shape_denotation != "" do
Enum.at(shape_denotation, index)
else
""
end
}
is_binary(acc) ->
%Dimension{
value: {:dim_param, acc},
denotation:
if shape_denotation != "" do
Enum.at(shape_denotation, index)
else
""
end
}
[] ->
_ = IO.puts("Empty acc")
true ->
raise "Invalid item in shape: #{inspect(acc)}. Needs to be integer or text type."
end
end)
|> List.flatten()
end
@doc """
Creates a GraphProto
"""
def make_graph(
nodes,
name,
inputs,
outputs,
initializer \\ [],
doc_string \\ "",
value_info \\ [],
sparse_initializer \\ []
) do
%Graph{
doc_string: doc_string,
initializer: initializer,
input: inputs,
name: name,
node: nodes,
output: outputs,
quantization_annotation: [],
sparse_initializer: sparse_initializer,
value_info: value_info
}
end
@doc """
Creates a ModelProto
"""
def make_model(graph, kwargs) do
%Model{
doc_string: Keyword.get(kwargs, :doc_string, ""),
domain: Keyword.get(kwargs, :domain, ""),
graph: graph,
ir_version: @onnx_ir_version,
metadata_props: Keyword.get(kwargs, :metadata_props, []),
model_version: Keyword.get(kwargs, :model_version, 1),
opset_import:
Keyword.get(kwargs, :opset_imports, [%Opset{domain: "", version: @onnx_opset_version}]),
producer_name: Keyword.get(kwargs, :producer_name, ""),
producer_version: Keyword.get(kwargs, :producer_version, "0.0.1-sf"),
training_info: Keyword.get(kwargs, :training_info, [])
}
end
@doc """
Prints a high level representation of a GraphProto
"""
def printable_graph(graph) do
IO.puts("============================================================")
IO.puts(" Graph: " <> graph.name)
IO.puts(" Output nodes: ")
Enum.each(graph.output, fn o ->
dims = for d <- elem(o.type.value, 1).shape.dim, do: elem(d.value, 1)
IO.puts(" " <> o.name <> " " <> inspect(dims))
end)
IO.puts("============================================================")
end
@doc """
Encodes and write a binary file f containing a ModelProto
"""
def save_model(proto, f) do
encoded_model = Onnx.ModelProto.encode!(proto)
{:ok, file} = File.open(f, [:write])
IO.binwrite(file, encoded_model)
File.close(file)
end
@doc """
Helper functions checking whether the passed val is of any of the Onnx types
"""
def is_TensorProto(val) do
is_map(val) and Map.has_key?(val, :__struct__) and
val.__struct__ === Onnx.TensorProto
end
def is_SparseTensorProto(val) do
is_map(val) and Map.has_key?(val, :__struct__) and
val.__struct__ === Onnx.SparseTensorProto
end
def is_GraphProto(val) do
is_map(val) and Map.has_key?(val, :__struct__) and
val.__struct__ === Onnx.GraphProto
end
def is_TypeProto(val) do
is_map(val) and Map.has_key?(val, :__struct__) and
val.__struct__ === Onnx.TypeProto
end
defp create_attribute_map(key, val) do
to_add =
cond do
is_float(val) ->
%{f: val, type: :FLOAT}
is_integer(val) ->
%{i: val, type: :INT}
is_binary(val) or is_boolean(val) ->
%{s: val, type: :STRING}
is_TensorProto(val) ->
%{t: val, type: :TENSOR}
is_SparseTensorProto(val) ->
%{sparse_tensor: val, type: :SPARSE_TENSOR}
is_GraphProto(val) ->
%{g: val, type: :GRAPH}
is_TypeProto(val) ->
%{tp: val, type: :TYPE_PROTO}
is_enum?(val) && Enum.all?(val, fn x -> is_integer(x) end) ->
%{ints: val, type: :INTS}
is_enum?(val) and Enum.all?(val, fn x -> is_float(x) or is_integer(x) end) ->
# Convert all the numbers to float
%{floats: Enum.map(val, fn v -> v / 1 end), type: :FLOATS}
is_enum?(val) and Enum.all?(val, fn x -> is_binary(x) end) ->
%{strings: val, type: :STRINGS}
is_enum?(val) and Enum.all?(val, fn x -> is_TensorProto(x) end) ->
%{tensors: val, type: :TENSORS}
is_enum?(val) and Enum.all?(val, fn x -> is_SparseTensorProto(x) end) ->
%{sparse_tensors: val, type: :SPARSE_TENSORS}
is_enum?(val) and Enum.all?(val, fn x -> is_GraphProto(x) end) ->
%{graphs: val, type: :GRAPHS}
is_enum?(val) and Enum.all?(val, fn x -> is_TypeProto(x) end) ->
%{type_protos: val, type: :TYPE_PROTOS}
end
Map.merge(
%Attribute{
name: Atom.to_string(key)
},
to_add
)
end
@doc """
Creates an attribute based on passed kwargs
"""
def make_attribute(kwargs) do
sortedargs = for {k, v} <- Enum.sort(kwargs), v != "", do: {k, v}
Enum.reduce(sortedargs, [], fn {key, val}, acc ->
[create_attribute_map(key, val) | acc]
end)
end
@doc """
Construct a NodeProto.
Arguments:
op_type (string): The name of the operator to construct
inputs (list of string): list of input names
outputs (list of string): list of output names
name (string, default None): optional unique identifier for NodeProto
doc_string (string, default None): optional documentation string for NodeProto
domain (string, default None): optional domain for NodeProto.
If it's None, we will just use default domain (which is empty)
kwargs (dict): the attributes of the node. The acceptable values
are documented in :func:`make_attribute`.
"""
def make_node(
op_type,
inputs,
outputs,
name \\ "",
kwargs \\ [],
doc_string \\ "",
domain \\ ""
) do
%Node{
op_type: op_type,
input: inputs,
output: outputs,
name: name,
domain: domain,
doc_string: doc_string,
attribute: make_attribute(kwargs)
}
end
def remove_initializer_from_input(%Model{graph: graph} = model) do
%{
model
| graph: %Graph{
graph
| input:
Enum.reject(model.graph.input, fn input ->
Enum.find(model.graph.initializer, fn init -> input.name === init.name end)
end)
}
}
end
end | lib/darknet_to_onnx/helper.ex | 0.871939 | 0.563138 | helper.ex | starcoder |
defmodule CodeCorps.GitHub.Adapters.Comment do
@moduledoc ~S"""
Used to convert between GitHub data which represents a GitHub Issue Comment
and `CodeCorps.Comment` as well as `CodeCorps.GithubComment` attributes.
"""
alias CodeCorps.{
Adapter.MapTransformer,
Comment,
GithubComment,
GitHub.Adapters.Utils.BodyDecorator
}
@github_comment_to_comment_mapping [
{:created_at, [:github_created_at]},
{:markdown, [:body]},
{:modified_at, [:github_updated_at]}
]
@github_payload_to_comment_mapping [
{:created_at, ["created_at"]},
{:markdown, ["body"]},
{:modified_at, ["updated_at"]}
]
@github_payload_to_github_comment_mapping [
{:body, ["body"]},
{:github_created_at, ["created_at"]},
{:github_id, ["id"]},
{:github_updated_at, ["updated_at"]},
{:html_url, ["html_url"]},
{:url, ["url"]}
]
@doc ~S"""
Converts a `CodeCorps.GithubComment` into a set of attributes suitable for
creating or updating a `CodeCorps.Comment`
"""
@spec to_comment(GithubComment.t) :: map
def to_comment(%GithubComment{} = github_comment) do
github_comment
|> Map.from_struct()
|> BodyDecorator.remove_code_corps_header()
|> MapTransformer.transform(@github_comment_to_comment_mapping)
end
@doc ~S"""
Converts a GitHub Issue Comment payload into a set of attributes suitable for
creating or updating a `CodeCorps.GithubComment`
"""
@spec to_github_comment(map) :: map
def to_github_comment(%{} = payload) do
payload |> MapTransformer.transform(@github_payload_to_github_comment_mapping)
end
@autogenerated_github_keys ~w(created_at id updated_at)
@doc ~S"""
Converts a `CodeCorps.Comment` into a set of attributes suitable for creating
or updating an GitHub Issue Comment through the GitHub API.
"""
@spec to_api(Comment.t) :: map
def to_api(%Comment{} = comment) do
comment
|> Map.from_struct
|> MapTransformer.transform_inverse(@github_payload_to_comment_mapping)
|> Map.drop(@autogenerated_github_keys)
|> BodyDecorator.add_code_corps_header(comment)
end
end | lib/code_corps/github/adapters/comment.ex | 0.500488 | 0.581897 | comment.ex | starcoder |
defmodule Alembic.Translator.Minecraft.Packets do
@moduledoc """
Contains Elixir-ized definitions of each useful client->server packet type,
including the data types to expect when attempting to decode the payload of
a packet of that type.
"""
import Alembic.Translator.Minecraft.Macros
defpacket 0x00, [ keepalive_id: :int ]
defpacket 0x02, [ protocol_version: :byte,
username: :string,
host: :string,
port: :int ]
defpacket 0x03, [ json: :string ]
defpacket 0x07, [ player_eid: :int,
target_eid: :int,
left_click?: :bool ]
defpacket 0x0a, [ on_ground?: :bool ]
defpacket 0x0b, [ x: :double,
y: :double,
stance: :double,
z: :double,
on_ground?: :bool ]
defpacket 0x0c, [ yaw: :float,
pitch: :float,
on_ground?: :bool ]
defpacket 0x0d, [ x: :double,
y: :double,
stance: :double,
z: :double,
yaw: :float,
pitch: :float,
on_ground?: :bool ]
defpacket 0x0e, [ action: :byte,
x: :int,
y: :byte,
z: :int,
face: :byte ]
defpacket 0x0f, [ x: :int,
y: :ubyte,
z: :int,
direction: :byte,
held_item: :slot,
cursor_x: :byte,
cursor_y: :byte,
cursor_z: :byte ]
defpacket 0x10, [ slot_id: :short ]
defpacket 0x12, [ entity_id: :int,
animation: :byte ]
defpacket 0x13, [ entity_id: :int,
action: :byte,
horse_jump_boost: :int ]
defpacket 0x1b, [ sideways: :float,
forwards: :float,
jump?: :bool,
unmount?: :bool ]
defpacket 0x65, [ window_id: :byte ]
defpacket 0x66, [ window_id: :byte,
slot_id: :short,
left_click?: :bool,
action_number: :short,
mode: :byte,
clicked_item: :slot ]
defpacket 0x6a, [ window_id: :byte,
action_number: :short,
accepted?: :bool ]
defpacket 0x6b, [ slot_id: :short,
clicked_item: :slot ]
defpacket 0x6c, [ window_id: :byte,
enchantment: :byte ]
defpacket 0x82, [ x: :int,
y: :short,
z: :int,
line_1: :string,
line_2: :string,
line_3: :string,
line_4: :string ]
defpacket 0xca, [ flags: :byte,
fly_speed: :float,
walk_speed: :float ]
defpacket 0xcb, [ text: :string ]
defpacket 0xcc, [ locale: :string,
view_distance: :byte,
chat_flags: :byte,
difficulty: :byte,
show_capes?: :bool ]
defpacket 0xcd, [ status: :byte ]
defpacket 0xfa, [ channel: :string,
message: :byte_array ]
defpacket 0xfc, [ shared_secret: :byte_array,
verify_token: :byte_array ]
defpacket 0xfe, [ ping: :byte ]
defpacket 0xff, [ reason: :string ]
end | lib/minecraft/packets.ex | 0.670608 | 0.490358 | packets.ex | starcoder |
defmodule AlphaVantage.FundamentalData do
@moduledoc """
A set of functions for fetching various temporal dimensions covering key financial metrics, income statements, balance sheets, cash flow, and other fundamental data points from [Alpha Vantage](www.alphavantage.co/documentation/#fundamentals).
"""
alias AlphaVantage.Gateway
@doc """
Returns the annual and quarterly income statements for the company of interest, with normalized fields mapped to GAAP and IFRS taxonomies of the SEC.
Data is generally refreshed on the same day a company reports its latest earnings and financials.
Please reference https://www.alphavantage.co/documentation/#income-statement for more detail.
## Parameters
**Required**
- `:symbol`
The name of the security of your choice, provided as a string.
For example: `"MSFT"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec income_statement(String.t(), Keyword.t()) :: Gateway.response()
def income_statement(symbol, opts \\ []) do
params = [function: "INCOME_STATEMENT", symbol: symbol]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the annual and quarterly balance sheets for the company of interest, with normalized fields mapped to GAAP and IFRS taxonomies of the SEC.
Data is generally refreshed on the same day a company reports its latest earnings and financials.
Please reference https://www.alphavantage.co/documentation/#balance-sheet for more detail.
## Parameters
**Required**
- `:symbol`
The name of the security of your choice, provided as a string.
For example: `"MSFT"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec balance_sheet(String.t(), Keyword.t()) ::
Gateway.response()
def balance_sheet(symbol, opts \\ []) do
params = [function: "BALANCE_SHEET", symbol: symbol]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns This API returns the annual and quarterly cash flow for the company of interest, with normalized fields mapped to GAAP and IFRS taxonomies of the SEC.
Data is generally refreshed on the same day a company reports its latest earnings and financials.
Please reference https://www.alphavantage.co/documentation/#cash-flow for more detail.
## Parameters
**Required**
- `:symbol`
The name of the security of your choice, provided as a string.
For example: `"MSFT"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec cash_flow(String.t(), Keyword.t()) :: Gateway.response()
def cash_flow(symbol, opts \\ []) do
params = [function: "CASH_FLOW", symbol: symbol]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the annual and quarterly earnings (EPS) for the company of interest.
Quarterly data also includes analyst estimates and surprise metrics.
Please reference https://www.alphavantage.co/documentation/#earnings for more detail.
## Parameters
**Required**
- `:symbol`
The name of the security of your choice, provided as a string.
For example: `"MSFT"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec earnings(String.t(), Keyword.t()) :: Gateway.response()
def earnings(symbol, opts \\ []) do
params = [function: "EARNINGS", symbol: symbol]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns the company information, financial ratios, and other key metrics for the equity specified.
Data is generally refreshed on the same day a company reports its latest earnings and financials.
Please reference https://www.alphavantage.co/documentation/#company-overview for more detail.
## Parameters
**Required**
- `:symbol`
The name of the security of your choice, provided as a string.
For example: `"MSFT"`
_Optional_ (accepted as a keyword list)
- `:datatype`
- `"map"` returns a map (default);
- `"json"` returns JSON format;
- `"csv"` returns a CSV (comma separated value) file string.
"""
@spec company_overview(String.t(), Keyword.t()) :: Gateway.response()
def company_overview(symbol, opts \\ []) do
params = [function: "OVERVIEW", symbol: symbol]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns a list of active or delisted US stocks and ETFs, either as of the latest trading day or at a specific time in history.
The endpoint is positioned to facilitate equity research on asset lifecycle and survivorship.
Please reference https://www.alphavantage.co/documentation/#listing-status for more detail.
Note: To ensure optimal API response time, this endpoint uses the CSV format which is more memory-efficient than JSON.
## Parameters
**Required**
- `:symbol`
The name of the security of your choice, provided as a string.
For example: `"MSFT"`
_Optional_ (accepted as a keyword list)
- `:date`
If no date is set, the API endpoint will return a list of active or delisted symbols as of the latest trading day.
If a date is set, the API endpoint will "travel back" in time and return a list of active or delisted symbols on that particular date in history.
Any YYYY-MM-DD date later than 2010-01-01 is supported. For example, `date: "2013-08-03"`.
- `:state`
By default, `state: "active"` and the API will return a list of actively traded stocks and ETFs.
Set `state: "delisted"` to query a list of delisted assets.
"""
@spec listing_status(String.t(), Keyword.t()) :: Gateway.response()
def listing_status(symbol, opts \\ []) do
params = [function: "LISTING_STATUS", symbol: symbol]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns a list of company earnings expected in the next 3, 6, or 12 months.
Please reference https://www.alphavantage.co/documentation/#earnings-calendar for more detail.
Note: To ensure optimal API response time, this endpoint uses the CSV format which is more memory-efficient than JSON.
## Parameters
_Optional_ (accepted as a keyword list)
- `:symbol`
The name of the security of your choice, provided as a string.
For example: `"MSFT"`
- `:horizon`
By default, `horizon: "3month"` and the API will return a list of expected company earnings in the next 3 months.
You may set `horizon: "6month"` or `horizon: "12month"` to query the earnings scheduled for the next 6 months or 12 months, respectively.
"""
@spec earnings_calendar(Keyword.t()) :: Gateway.response()
def earnings_calendar(opts \\ []) do
params = [function: "EARNINGS_CALENDAR"]
AlphaVantage.query(Keyword.merge(params, opts))
end
@doc """
Returns a list of IPOs expected in the next 3 months.
Please reference https://www.alphavantage.co/documentation/#ipo-calendar for more detail.
Note: To ensure optimal API response time, this endpoint uses the CSV format which is more memory-efficient than JSON.
"""
@spec ipo_calendar(Keyword.t()) :: Gateway.response()
def ipo_calendar(opts \\ []) do
params = [function: "IPO_CALENDAR"]
AlphaVantage.query(Keyword.merge(params, opts))
end
end | lib/alpha_vantage/fundamental_data.ex | 0.920137 | 0.879095 | fundamental_data.ex | starcoder |
defmodule Donut.GraphQL.Identity.Credential do
use Donut.GraphQL.Schema.Notation
@desc "The type of credential"
enum :credential_type do
value :email
end
@desc "The state of a given authentication credential"
mutable_object :credential do
immutable do
field :type, non_null(:credential_type), description: "The type of credential"
field :status, :verification_status, description: "The current verification status of the credential"
field :presentable, :string, description: "The presentable information about the credential"
end
@desc "Remove the credential"
field :remove, type: result(:error) do
resolve fn
%{ type: :email }, _, %{ context: %{ token: token } } ->
case Gobstopper.API.Auth.Email.remove(token) do
:ok -> { :ok, nil }
{ :error, reason } -> { :ok, %Donut.GraphQL.Result.Error{ message: reason } }
end
end
end
end
@desc """
The collection of possible results from a credential request. If successful
returns the `Credential` trying to be accessed, otherwise returns an error.
"""
result :credential, [:credential]
@desc """
The collection of possible results from a credential mutate request. If
successful returns the `MutableCredential` trying to be modified, otherwise
returns an error.
"""
result :mutable_credential, [:mutable_credential]
mutable_object :credential_queries do
immutable do
@desc "The credentials associated with the identity"
field :credentials, list_of(result(mutable(:credential))) do
@desc "The type of credential to retrieve"
arg :type, :credential_type
@desc "The status of the credentials to retrieve"
arg :status, :verification_status
@desc """
Whether to retrieve credentials that have been associated with the
identity, or ones which have not.
"""
arg :associated, :boolean
resolve fn
%{ token: token }, args, _ ->
case Gobstopper.API.Auth.all_credentials(token) do
{ :ok, credentials } -> { :ok, filter_credentials(credentials, args) }
{ :error, reason } -> { :ok, %Donut.GraphQL.Result.Error{ message: reason } }
end
end
end
end
end
defp filter_credentials(credentials, args = %{ associated: associated }) do
filter_credentials(credentials, Map.delete(args, :associated))
|> Enum.filter(fn
%{ status: _, presentable: _ } -> associated
_ -> !associated
end)
end
defp filter_credentials(credentials, %{ type: type, status: status }) do
Enum.find_value(credentials, [], fn
{ ^type, { ^status, presentable } } -> [%{ type: type, status: status, presentable: presentable }]
_ -> false
end)
end
defp filter_credentials(credentials, %{ type: type }) do
Enum.find_value(credentials, [], fn
{ ^type, { :none, nil } } -> [%{ type: type }]
{ ^type, { status, presentable } } -> [%{ type: type, status: status, presentable: presentable }]
_ -> false
end)
end
defp filter_credentials(credentials, %{ status: status }) do
Enum.reduce(credentials, [], fn
{ type, { ^status, presentable } }, acc -> [%{ type: type, status: status, presentable: presentable }|acc]
_, acc -> acc
end)
|> Enum.reverse
end
defp filter_credentials(credentials, _) do
Enum.map(credentials, fn
{ type, { :none, nil } } -> %{ type: type }
{ type, { status, presentable } } -> %{ type: type, status: status, presentable: presentable }
end)
end
object :credential_mutations do
@desc "Add or replace a credential for an identity"
field :set_credential, type: result(:mutable_credential) do
@desc "The email credential to associate with the identity"
arg :email_credential, :email_credential
resolve fn
%{ token: token }, %{ email_credential: %{ email: email, password: <PASSWORD> } }, _ ->
case Gobstopper.API.Auth.Email.set(token, email, pass) do
:ok ->
case Gobstopper.API.Auth.Email.get(token) do
{ :ok, { status, presentable } } -> { :ok, %{ type: :email, status: status, presentable: presentable } }
{ :error, reason } -> { :ok, %Donut.GraphQL.Result.Error{ message: reason } }
end
{ :error, reason } -> { :ok, %Donut.GraphQL.Result.Error{ message: reason } }
end
_, %{}, _ -> { :error, "Missing credential" }
_, _, _ -> { :error, "Only one credential can be specified" }
end
end
end
end | apps/donut_graphql/lib/donut.graphql/identity/credential.ex | 0.692226 | 0.408188 | credential.ex | starcoder |
defmodule ExWire.Sync do
@moduledoc """
This is the heart of our syncing logic. Once we've connected to a number
of peers via `ExWire.PeerSupervisor`, we begin to ask for new blocks from
those peers. As we receive blocks, we add them to our
`ExWire.Struct.BlockQueue`.
If the blocks are confirmed by enough peers, then we verify the block and
add it to our block tree.
Note: we do not currently store the block tree, and thus we need to build
it from genesis each time.
"""
use GenServer
require Logger
alias Block.Header
alias Blockchain.{Block, Blocktree, Blocktree.State, Chain}
alias Exth.Time
alias ExWire.Packet
alias ExWire.PeerSupervisor
alias ExWire.Struct.{BlockQueue, Peer, WarpQueue}
alias ExWire.Sync.{WarpProcessor, WarpState}
alias MerklePatriciaTree.{DB, Trie, TrieStorage}
alias ExWire.Packet.Capability.Eth.{
BlockBodies,
BlockHeaders,
GetBlockBodies,
GetBlockHeaders
}
alias ExWire.Packet.Capability.Par.{
GetSnapshotData,
GetSnapshotManifest,
SnapshotData,
SnapshotManifest
}
alias ExWire.Packet.Capability.Par.SnapshotData.{BlockChunk, StateChunk}
@save_block_interval 100
@blocks_per_request 100
@startup_delay 10_000
@retry_delay 5_000
@request_limit 5
@queue_limit 5
@type state :: %{
chain: Chain.t(),
block_queue: BlockQueue.t(),
warp_queue: WarpQueue.t(),
block_tree: Blocktree.t(),
trie: Trie.t(),
last_requested_block: integer() | nil,
starting_block_number: non_neg_integer() | nil,
highest_block_number: non_neg_integer() | nil,
warp: boolean(),
warp_processor: GenServer.server()
}
@spec get_state(GenServer.server()) :: state
def get_state(name \\ __MODULE__) do
GenServer.call(name, :get_state)
end
@doc """
Starts a sync process for a given chain.
"""
@spec start_link({Trie.t(), Chain.t(), boolean(), WarpQueue.t() | nil}, Keyword.t()) ::
GenServer.on_start()
def start_link({trie, chain, warp, warp_queue}, opts \\ []) do
warp_processor = Keyword.get(opts, :warp_processor, WarpProcessor)
GenServer.start_link(__MODULE__, {trie, chain, warp, warp_queue, warp_processor},
name: Keyword.get(opts, :name, __MODULE__)
)
end
@doc """
Once we start a sync server, we'll wait for active peers and
then begin asking for blocks.
TODO: Let's say we haven't connected to any peers before we call
`request_next_block`, then the client effectively stops syncing.
We should handle this case more gracefully.
"""
@impl true
def init({trie, chain, warp, warp_queue, warp_processor}) do
block_tree = load_sync_state(TrieStorage.permanent_db(trie))
block_queue = %BlockQueue{}
{:ok, {block, _caching_trie}} = Blocktree.get_best_block(block_tree, chain, trie)
state = %{
chain: chain,
block_queue: block_queue,
warp_queue: warp_queue,
block_tree: block_tree,
trie: trie,
last_requested_block: nil,
starting_block_number: block.header.number,
highest_block_number: block.header.number,
warp: warp,
warp_processor: warp_processor
}
if warp do
if warp_queue.manifest do
Process.send_after(self(), :resume_warp, @startup_delay)
else
Process.send_after(self(), :request_manifest, @startup_delay)
end
else
request_next_block(@startup_delay)
end
{:ok, state}
end
defp request_next_block(timeout \\ 0) do
Process.send_after(self(), :request_next_block, timeout)
end
@impl true
def handle_cast(
{:processed_block_chunk, chunk_hash, processed_blocks, block},
state = %{warp_queue: warp_queue}
) do
next_state =
warp_queue
|> WarpQueue.processed_block_chunk(chunk_hash, block, processed_blocks)
|> dispatch_new_warp_queue_requests()
|> save_and_check_warp_complete(state)
{:noreply, next_state}
end
def handle_cast(
{:processed_state_chunk, chunk_hash, processed_accounts, state_root},
state = %{warp_queue: warp_queue}
) do
next_state =
warp_queue
|> WarpQueue.processed_state_chunk(chunk_hash, processed_accounts, state_root)
|> dispatch_new_warp_queue_requests()
|> save_and_check_warp_complete(state)
{:noreply, next_state}
end
@doc """
When were receive a block header, we'll add it to our block queue. When we
receive the corresponding block body, we'll add that as well.
"""
@impl true
def handle_info(
:request_next_block,
state = %{block_queue: block_queue, block_tree: block_tree}
) do
new_state = handle_request_next_block(block_queue, block_tree, state)
{:noreply, new_state}
end
def handle_info(:resume_warp, state = %{warp_queue: warp_queue}) do
new_state =
warp_queue
|> dispatch_new_warp_queue_requests()
|> save_and_check_warp_complete(state, false)
{:noreply, new_state}
end
def handle_info(:request_manifest, state) do
new_state = handle_request_manifest(state)
{:noreply, new_state}
end
def handle_info({:request_chunk, chunk_hash}, state) do
new_state = handle_request_chunk(chunk_hash, state)
{:noreply, new_state}
end
def handle_info({:packet, %BlockHeaders{} = block_headers, peer}, state) do
{:noreply, handle_block_headers(block_headers, peer, state)}
end
def handle_info({:packet, %BlockBodies{} = block_bodies, _peer}, state) do
{:noreply, handle_block_bodies(block_bodies, state)}
end
def handle_info({:packet, %SnapshotManifest{} = snapshot_manifest, peer}, state) do
{:noreply, handle_snapshot_manifest(snapshot_manifest, peer, state)}
end
def handle_info(
{:packet, %SnapshotData{} = snapshot_data, peer},
state
) do
{:noreply, handle_snapshot_data(snapshot_data, peer, state)}
end
def handle_info({:packet, packet, peer}, state) do
:ok = Exth.trace(fn -> "[Sync] Ignoring packet #{packet.__struct__} from #{peer}" end)
{:noreply, state}
end
@impl true
def handle_call(:get_state, _from, state) do
{:reply, state, state}
end
@doc """
Dispatches a packet of `GetSnapshotManifest` to all capable peers.
# TODO: That "capable peers" part.
"""
@spec handle_request_manifest(state()) :: state()
def handle_request_manifest(state) do
if send_with_retry(%GetSnapshotManifest{}, :all, :request_manifest) do
:ok = Logger.debug(fn -> "[Sync] Requested snapshot manifests" end)
end
state
end
@doc """
Dispatches a packet of `GetSnapshotData` to a random capable peer.
# TODO: That "capable peer" part.
"""
@spec handle_request_chunk(EVM.hash(), state()) :: state()
def handle_request_chunk(chunk_hash, state) do
if send_with_retry(
%GetSnapshotData{chunk_hash: chunk_hash},
:random,
{:request_chunk, chunk_hash}
) do
:ok = Logger.debug(fn -> "Requested block chunk #{Exth.encode_hex(chunk_hash)}..." end)
end
state
end
@doc """
Dispatches a packet of `GetBlockHeaders` to a peer for the next block
number that we don't have in our block queue or state tree.
"""
@spec handle_request_next_block(BlockQueue.t(), Blocktree.t(), state()) :: state()
def handle_request_next_block(block_queue, block_tree, state) do
next_block_to_request = get_next_block_to_request(block_queue, block_tree)
if send_with_retry(
%GetBlockHeaders{
block_identifier: next_block_to_request,
max_headers: @blocks_per_request,
skip: 0,
reverse: false
},
:random,
:request_next_block
) do
:ok = Logger.debug(fn -> "[Sync] Requested block #{next_block_to_request}" end)
Map.put(state, :last_requested_block, next_block_to_request + @blocks_per_request)
else
state
end
end
@doc """
When we receive a new snapshot manifest, we add it to our warp queue. We may
have new blocks to fetch, so we ask the warp queue for more blocks to
request. We may already, however, be waiting on blocks, in which case we
do nothing.
"""
@spec handle_snapshot_manifest(SnapshotManifest.t(), Peer.t(), state()) :: state()
def handle_snapshot_manifest(%SnapshotManifest{manifest: nil}, _peer, state) do
:ok = Logger.info("Received empty Snapshot Manifest")
state
end
def handle_snapshot_manifest(
%SnapshotManifest{manifest: manifest},
_peer,
state = %{warp_queue: warp_queue}
) do
next_state =
warp_queue
|> WarpQueue.new_manifest(manifest)
|> dispatch_new_warp_queue_requests()
|> save_and_check_warp_complete(state)
next_state
end
@spec dispatch_new_warp_queue_requests(WarpQueue.t(), integer(), integer()) :: WarpQueue.t()
defp dispatch_new_warp_queue_requests(
warp_queue,
request_limit \\ @request_limit,
queue_limit \\ @queue_limit
) do
{new_warp_queue, hashes_to_request} =
WarpQueue.get_hashes_to_request(warp_queue, request_limit, queue_limit)
for hash <- hashes_to_request do
request_chunk(hash)
end
new_warp_queue
end
@doc """
When we receive a SnapshotData, let's try to add the received block to the
warp queue. We may decide to request new blocks at this time.
"""
@spec handle_snapshot_data(SnapshotData.t(), Peer.t(), state()) :: state()
def handle_snapshot_data(%SnapshotData{chunk: nil}, _peer, state) do
:ok = Logger.debug("Received empty SnapshotData message.")
state
end
def handle_snapshot_data(
%SnapshotData{hash: block_chunk_hash, chunk: block_chunk = %BlockChunk{}},
_peer,
state = %{warp_queue: warp_queue, warp_processor: warp_processor}
) do
next_warp_queue =
warp_queue
|> WarpQueue.new_block_chunk(block_chunk_hash)
|> dispatch_new_warp_queue_requests()
WarpProcessor.new_block_chunk(warp_processor, block_chunk_hash, block_chunk)
%{state | warp_queue: next_warp_queue}
end
def handle_snapshot_data(
%SnapshotData{hash: state_chunk_hash, chunk: state_chunk = %StateChunk{}},
_peer,
state = %{warp_queue: warp_queue, warp_processor: warp_processor}
) do
next_warp_queue =
warp_queue
|> WarpQueue.new_state_chunk(state_chunk_hash)
|> dispatch_new_warp_queue_requests()
WarpProcessor.new_state_chunk(warp_processor, state_chunk_hash, state_chunk)
%{state | warp_queue: next_warp_queue}
end
@doc """
When we get block headers from peers, we add them to our current block
queue to incorporate the blocks into our state chain.
Note: some blocks (esp. older ones or on test nets) may be empty, and thus
we won't need to request the bodies. These we process right away.
Otherwise, we request the block bodies for the blocks we don't
know about.
Note: we process blocks in memory and save our state tree every so often.
Note: this mimics a lot of the logic from block bodies since a header
of an empty block *is* a complete block.
"""
@spec handle_block_headers(BlockHeaders.t(), Peer.t(), state()) :: state()
def handle_block_headers(
block_headers,
peer,
state = %{
block_queue: block_queue,
block_tree: block_tree,
chain: chain,
trie: trie,
highest_block_number: highest_block_number
}
) do
if Enum.empty?(block_headers.headers) do
:ok = maybe_request_next_block(block_queue)
state
else
{next_highest_block_number, next_block_queue, next_block_tree, next_trie, header_hashes} =
Enum.reduce(
block_headers.headers,
{highest_block_number, block_queue, block_tree, trie, []},
fn header, {highest_block_number, block_queue, block_tree, trie, header_hashes} ->
header_hash = header |> Header.hash()
{next_block_queue, next_block_tree, next_trie, should_request_block} =
BlockQueue.add_header(
block_queue,
block_tree,
header,
header_hash,
peer.remote_id,
chain,
trie
)
next_header_hashes =
if should_request_block do
:ok = Logger.debug(fn -> "[Sync] Requesting block body #{header.number}" end)
[header_hash | header_hashes]
else
header_hashes
end
next_highest_block_number = Kernel.max(highest_block_number, header.number)
{next_highest_block_number, next_block_queue, next_block_tree, next_trie,
next_header_hashes}
end
)
:ok =
PeerSupervisor.send_packet(
%GetBlockBodies{
hashes: header_hashes
},
:random
)
next_maybe_saved_trie = maybe_save(block_tree, next_block_tree, next_trie)
:ok = maybe_request_next_block(next_block_queue)
state
|> Map.put(:block_queue, next_block_queue)
|> Map.put(:block_tree, next_block_tree)
|> Map.put(:trie, next_maybe_saved_trie)
|> Map.put(:highest_block_number, next_highest_block_number)
end
end
@doc """
After we're given headers from peers, we request the block bodies. Here we
try to add those blocks to our block tree. It's possbile we receive block
`n + 1` before we receive block `n`, so in these cases, we queue up the
blocks until we process the parent block.
Note: we process blocks in memory and save our state tree every so often.
"""
@spec handle_block_bodies(BlockBodies.t(), state()) :: state()
def handle_block_bodies(
block_bodies,
state = %{
block_queue: block_queue,
block_tree: block_tree,
chain: chain,
trie: trie
}
) do
{next_block_queue, next_block_tree, next_trie} =
Enum.reduce(block_bodies.blocks, {block_queue, block_tree, trie}, fn block_body,
{block_queue,
block_tree, trie} ->
BlockQueue.add_block_struct(block_queue, block_tree, block_body, chain, trie)
end)
next_maybe_saved_trie = maybe_save(block_tree, next_block_tree, next_trie)
:ok = maybe_request_next_block(next_block_queue)
state
|> Map.put(:block_queue, next_block_queue)
|> Map.put(:block_tree, next_block_tree)
|> Map.put(:trie, next_maybe_saved_trie)
end
# Determines the next block we don't yet have in our blocktree and
# dispatches a request to all connected peers for that block and the
# next `n` blocks after it.
@spec get_next_block_to_request(BlockQueue.t(), Blocktree.t()) :: integer()
defp get_next_block_to_request(block_queue, block_tree) do
# This is the best we know about
next_number =
case block_tree.best_block do
nil -> 0
%Block{header: %Header{number: number}} -> number + 1
end
# But we may have it queued up already in the block queue, let's
# start from the first we *don't* know about. It's possible there's
# holes in block queue, so it's not `max(best_block.number, max(keys(queue)))`,
# though it could be...
next_number
|> Stream.iterate(fn n -> n + 1 end)
|> Stream.reject(fn n -> MapSet.member?(block_queue.block_numbers, n) end)
|> Enum.at(0)
end
@spec maybe_save(Blocktree.t(), Blocktree.t(), Trie.t()) :: Trie.t()
defp maybe_save(block_tree, next_block_tree, trie) do
if block_tree != next_block_tree do
block_number = next_block_tree.best_block.header.number
if rem(block_number, @save_block_interval) == 0 do
save_sync_state(next_block_tree, trie)
else
trie
end
else
trie
end
end
@spec request_chunk(EVM.hash()) :: reference()
defp request_chunk(chunk_hash) do
Process.send_after(self(), {:request_chunk, chunk_hash}, 0)
end
@spec maybe_request_next_block(BlockQueue.t()) :: :ok
defp maybe_request_next_block(block_queue) do
# Let's pull a new block if we have none left
_ =
if block_queue.queue == %{} do
request_next_block()
end
:ok
end
@spec save_and_check_warp_complete(WarpQueue.t(), state(), boolean()) :: state()
defp save_and_check_warp_complete(warp_queue, state = %{trie: trie}, save \\ true) do
if save do
:ok = WarpState.save_warp_queue(TrieStorage.permanent_db(trie), warp_queue)
end
case WarpQueue.status(warp_queue) do
{:pending, reason} ->
Exth.trace(fn ->
"[Sync] Warp incomplete due to #{to_string(reason)}"
end)
%{
state
| warp_queue: warp_queue
}
:success ->
:ok =
Logger.info("[Warp] Warp Completed in #{Time.elapsed(warp_queue.warp_start, :second)}")
# Save our process
saved_tried = save_sync_state(warp_queue.block_tree, trie)
# Request a normal sync to start
request_next_block()
# TODO: Clear the warp cache
# And onward!
%{
state
| warp_queue: warp_queue,
trie: saved_tried,
warp: false
}
end
end
# Loads sync state from our backing database
@spec load_sync_state(DB.db()) :: Blocktree.t()
defp load_sync_state(db) do
State.load_tree(db)
end
# Save sync state from our backing database.
@spec save_sync_state(Blocktree.t(), Trie.t()) :: Trie.t()
defp save_sync_state(blocktree, trie) do
committed_trie = TrieStorage.commit!(trie)
committed_trie
|> TrieStorage.permanent_db()
|> State.save_tree(blocktree)
committed_trie
end
@spec send_with_retry(
Packet.packet(),
PeerSupervisor.node_selector(),
:request_manifest | :request_next_block | {:request_chunk, EVM.hash()}
) :: boolean()
defp send_with_retry(packet, node_selector, retry_message) do
send_packet_result =
PeerSupervisor.send_packet(
packet,
node_selector
)
case send_packet_result do
:ok ->
true
:unsent ->
:ok =
Logger.debug(fn ->
"[Sync] No connected peers to send #{packet.__struct__}, trying again in #{
@retry_delay / 1000
} second(s)"
end)
Process.send_after(self(), retry_message, @retry_delay)
false
end
end
end | apps/ex_wire/lib/ex_wire/sync.ex | 0.758421 | 0.49109 | sync.ex | starcoder |
defmodule Direction do
def vertical?(direction), do: direction == :up or direction == :down
def horizontal?(direction), do: direction == :right or direction == :left
end
defmodule Vector do
@type t :: %__MODULE__{
direction: :up | :right | :down | :left,
magnitude: integer
}
@enforce_keys [:direction]
defstruct [:direction, magnitude: 0]
end
defmodule Segment do
@type t :: %__MODULE__{
direction: :up | :right | :down | :left,
length: integer,
start: {integer, integer},
distance: integer
}
@enforce_keys [:direction, :length, :start, :distance]
defstruct [:direction, :length, :start, :distance]
def ending(%Segment{direction: d, length: l, start: {x, y}}) do
case d do
:up -> {x, y + l}
:down -> {x, y - l}
:right -> {x + l, y}
:left -> {x - l, y}
end
end
def x_range(%Segment{direction: d, length: l, start: {x, _y}}) do
case d do
:right -> x..(x + l)
:left -> x..(x - l)
_ -> x..x
end
end
def y_range(%Segment{direction: d, length: l, start: {_x, y}}) do
case d do
:up -> y..(y + l)
:down -> y..(y - l)
_ -> y..y
end
end
def vertical?(%Segment{direction: d}), do: vertical?(d)
def vertical?(d), do: d == :up or d == :down
def parallel?(s1, s2), do: vertical?(s1 == vertical?(s2))
def intersection(segment, other) do
!parallel?(segment, other) and
(
vertical = if vertical?(segment), do: segment, else: other
horizontal = if vertical?(segment), do: other, else: segment
%Segment{start: {x1, y1}, distance: p1} = vertical
%Segment{start: {x2, y2}, distance: p2} = horizontal
if x1 in x_range(horizontal) and y2 in y_range(vertical) do
point = {x1, y2}
distance_vertical = p1 + abs(y1 - y2)
distance_horizontal = p2 + abs(x1 - x2)
{point, distance_vertical + distance_horizontal}
else
nil
end
)
end
end
defmodule D3 do
@moduledoc """
--- Day 3: Crossed Wires ---
The gravity assist was successful, and you're well on your way to the Venus refuelling station. During the rush back on Earth, the fuel management system wasn't completely installed, so that's next on the priority list.
Opening the front panel reveals a jumble of wires. Specifically, two wires are connected to a central port and extend outward on a grid. You trace the path each wire takes as it leaves the central port, one wire per line of text (your puzzle input).
The wires twist and turn, but the two wires occasionally cross paths. To fix the circuit, you need to find the intersection point closest to the central port. Because the wires are on a grid, use the Manhattan distance for this measurement. While the wires do technically cross right at the central port where they both start, this point does not count, nor does a wire count as crossing with itself.
What is the Manhattan distance from the central port to the closest intersection?
--- Part Two ---
It turns out that this circuit is very timing-sensitive; you actually need to minimize the signal delay.
To do this, calculate the number of steps each wire takes to reach each intersection; choose the intersection where the sum of both wires' steps is lowest. If a wire visits a position on the grid multiple times, use the steps value from the first time it visits that position when calculating the total value of a specific intersection.
The number of steps a wire takes is the total number of grid squares the wire has entered to get to that location, including the intersection being considered.
What is the fewest combined steps the wires must take to reach an intersection?
"""
@behaviour Day
defp parse(input) do
{stack, current} =
input
|> to_charlist
|> Enum.reduce({[], nil}, fn c, {stack, current} ->
case c do
?U -> {stack, %Vector{direction: :up}}
?R -> {stack, %Vector{direction: :right}}
?D -> {stack, %Vector{direction: :down}}
?L -> {stack, %Vector{direction: :left}}
?, -> {[current | stack], nil}
x when x in ?0..?9 -> {stack, Map.update!(current, :magnitude, &(&1 * 10 + x - ?0))}
_ -> raise ArgumentError, "bad input"
end
end)
# last one doesn't finish getting parsed because the input is trimmed
stack = [current | stack]
Enum.reverse(stack)
end
defp to_segments(instructions) do
[_final_location, lr_segments, ud_segments, _total_distance] =
Enum.reduce(instructions, [{0, 0}, [], [], 0], fn %Vector{
direction: direction,
magnitude: magnitude
},
[
{x, y},
lr_segments,
ud_segments,
distance
] ->
segment = %Segment{
direction: direction,
length: magnitude,
start: {x, y},
distance: distance
}
final_location = Segment.ending(segment)
final_steps = distance + magnitude
{lr_segments, ud_segments} =
case Segment.vertical?(segment) do
true -> {lr_segments, [segment | ud_segments]}
false -> {[segment | lr_segments], ud_segments}
end
[final_location, lr_segments, ud_segments, final_steps]
end)
{lr_segments, ud_segments}
end
def solve(input) do
instruction_sets = Enum.map(input, &parse/1)
[{lr_segments_1, ud_segments_1}, {lr_segments_2, ud_segments_2}] =
Enum.map(instruction_sets, &to_segments/1)
intersections_1 =
Enum.flat_map(lr_segments_1, fn segment ->
ud_segments_2
|> Enum.map(fn other -> Segment.intersection(segment, other) end)
|> Enum.filter(& &1)
end)
intersections_2 =
Enum.flat_map(lr_segments_2, fn segment ->
ud_segments_1
|> Enum.map(fn other -> Segment.intersection(segment, other) end)
|> Enum.filter(& &1)
end)
intersections = intersections_1 ++ intersections_2
part_1 =
intersections
|> Enum.map(fn
{{0, 0}, _steps} -> 999_999
{{x, y}, _steps} -> abs(x) + abs(y)
end)
|> Enum.min()
part_2 =
intersections
|> Enum.map(fn
{_, 0} -> 999_999
{_, steps} -> steps
end)
|> Enum.min()
{
part_1,
part_2
}
end
end | lib/days/03.ex | 0.864096 | 0.900092 | 03.ex | starcoder |
defmodule VimCompiler.Parser do
alias VimCompiler.Ast
import VimCompiler.Helpers.LeexHelpers, only: [tokenize: 2]
def parse(str) do
with tokens <- skip_ws(tokenize(str, with: :lexer)) do
parse(tokens, %Ast.Tree{})
end
end
def parse([], result_tree) do
{:ok, Ast.Tree.finalize(result_tree)}
end
def parse([{:kw_def, _, deftype}|tokens], result_tree) do
with {:ok, result_tree1, rest} <- parse_definition(deftype, skip_ws(tokens), result_tree) do
parse(skip_ws(rest), result_tree1)
end
end
def parse(tokens, result) do
with {:ok, expression, rest} <- parse_expression(skip_ws(tokens)) do
parse(skip_ws(rest), [expression|result])
end
end
@doc """
parses
(?< defp?) name DefinitionBody
"""
def parse_definition(deftype, [{:name, _, name}|rest], result_tree) do
with {:ok, param_patterns, rest1} <- parse_param_patterns(skip_ws(rest), []) do
# looking at kw_do or sy_assigns here:
with {:ok, code, rest2} <- parse_definition_body(rest1) do
{:ok, Ast.Tree.add_definition(result_tree, name, deftype == "defp", param_patterns, code), rest2}
end
end
end
def parse_definition(deftype, tokens, _) do
{:error, "Illegal name after #{deftype}", tokens}
end
@doc """
parses
= Expression
or
do
(Assignment)* (or +, not sure yet)
end
"""
def parse_definition_body([{:sy_assigns,_,_}|body]), do: parse_expression(body)
def parse_definition_body([{:kw_do,_,_}|body]), do: parse_multi_definition_body(skip_ws(body), [])
def parse_multi_definition_body(tokens, result) do
with {:ok, body, rest} <- parse_assignment(tokens), do: parse_multi_definition_body(rest, [body|result])
end
def parse_multi_definition_body([{:kw_end,_,_}|rest], result), do: {:ok, Enum.reverse(result), rest}
def parse_multi_definition_body([], result), do: {:error, "Missing kw end", result}
@doc """
(Pattern ("," Pattern)*)?
with follow: { kw_do, sy_assigns }
For now: Pattern == Primary
"""
def parse_param_patterns(tokens = [{:sy_assigns, _, _}|_], result), do: {:ok, Enum.reverse(result), tokens}
def parse_param_patterns(tokens = [{:kw_do, _, _}|_], result), do: {:ok, Enum.reverse(result), tokens}
def parse_param_patterns(tokens, result) do
with {:ok, pattern, rest} <- parse_pattern(tokens), do: parse_param_patterns(skip_ws(rest), [pattern|result])
end
@doc """
Name = Expression | => AssignmentPrime
Expression
"""
def parse_assignment(tokens = [{:name, _, name} | tokens1]) do
case parse_assignment_prime(name, skip_ws(tokens1)) do
{:ok, _, _ } = t -> t
:backtrace -> parse_expression(tokens)
end
end
def parse_assignment(tokens), do: parse_expression(tokens)
defp parse_assignment_prime(name, [{:sy_assigns, _, _} | tokens]) do
with {:ok, expression, rest} <- parse_expression(skip_ws(tokens)) do
{:ok, %Ast.Assignment{name: name, value: expression}, rest}
end
end
defp parse_assignment_prime(_, _), do: :backtrace
@doc """
parses:
Expression ::=
name op6 Term | \
name op9 Factor | => ExpressionWithName
name Params | /
name |
Term;
"""
def parse_expression([]) do
{ :ok, %Ast.EOF{}, [] }
end
def parse_expression([{:name, _, name}]), do: %Ast.Name{text: name}
def parse_expression([{:name, _, name}|rest]), do: parse_expression_with_name(name, skip_ws(rest))
def parse_expression(tokens), do: parse_term(skip_ws(tokens))
defp parse_expression_with_name(name, [{op6, _, op}|tokens]) do
with {:ok, rhs, rest} <- parse_term(skip_ws(tokens)) do
{:ok, %Ast.Term{lhs: %Ast.Name{text: name}, op: String.to_atom(op), rhs: rhs}, rest}
end
end
defp parse_expression_with_name(name, [{op9, _, op}|tokens]) do
with {:ok, rhs, rest} <- parse_factor(skip_ws(tokens)) do
{:ok, %Ast.Factor{lhs: %Ast.Name{text: name}, op: String.to_atom(op), rhs: rhs}, rest}
end
end
defp parse_expression_with_name(name, tokens) do
with {:ok, params, rest} <- parse_params(skip_ws(tokens), []) do
{:ok, %Ast.Invocation{fn: name, params: params}, rest}
end
end
def parse_params(tokens, params) do
if end_of_params?(tokens) do
{:ok, Enum.reverse(params), tokens}
else
with {:ok, param, rest} <- parse_primary(tokens), do: parse_params(skip_ws(rest), [param | params])
end
end
def parse_term(tokens) do
with r = {:ok, lhs, rest} <- parse_factor(tokens) do
case rest do
[{:op6, _, op}|rhs] -> parse_term_rhs(skip_ws(rhs), %Ast.Term{lhs: lhs, op: String.to_atom(op)})
_ -> r
end
end
end
defp parse_term_rhs(tokens, ast_so_far) do
with {:ok, rhs, rest} <- parse_term(tokens) do
{:ok, %{ast_so_far | rhs: rhs}, rest}
end
end
def parse_factor(tokens) do
with r = {:ok, lhs, rest} <- parse_primary(tokens) do
case rest do
[{:op9, _, op}|rhs] -> parse_term_rhs(skip_ws(rhs), %Ast.Factor{lhs: lhs, op: String.to_atom(op)})
_ -> r
end
end
end
def parse_primary([{:lt_number, _, number} | rest]) do
{:ok, %Ast.Number{value: number}, skip_ws(rest)}
end
def parse_primary([{:name, _, name} | rest]) do
{:ok, %Ast.Name{text: name}, skip_ws(rest)}
end
def parse_primary([{:sy_quote, _, _} | rest]) do
with {:ok, parts, rest1} <- parse_string(rest, []) do
{:ok, %Ast.String{parts: parts}, skip_ws(rest1)}
end
end
def parse_primary([{:sy_lbrack, _, _} | rest]) do
with {:ok, elements, rest1} <- parse_list(skip_ws(rest), []) do
{:ok, %Ast.List{elements: elements}, rest1}
end
end
def parse_primary([{:sy_lparen, _, _} | rest]) do
with {:ok, inner_expression, rest1} <- parse_expression(skip_ws(rest)) do
case rest1 do
[{:sy_rparen, _, _} | rest2] -> {:ok, inner_expression, skip_ws(rest2)}
_ -> {:error, "Missing )", rest1}
end
end
end
# For now:
def parse_pattern(tokens), do: parse_primary(tokens)
def parse_list([{:sy_rbrack,_,_}|rest], result), do: {:ok, Enum.reverse(result), skip_ws(rest)}
def parse_list(tokens, result) do
with {:ok, term, rest} <- parse_term(skip_ws(tokens)) do
case rest do
[{:sy_rbrack,_,_}|rest1] -> {:ok, Enum.reverse([term|result]), skip_ws(rest1)}
[{:sy_comma,_,_}|rest1] -> parse_list(skip_ws(rest1), [term|result])
_ -> {:error, "Missing , or ]", rest}
end
end
end
def parse_string([], result), do: {:ok, compress_result(result, []), []}
def parse_string([{:sy_quote, _, _} | rest], result), do: {:ok, compress_result(result, []), rest}
def parse_string([{:escape, _, _} | rest], result), do: parse_string_prime(rest, result)
def parse_string([{:sy_hashacc, _, _} | rest], result) do
with {:ok, expression, rest1} <- parse_expression(rest) do
case rest1 do
[{:sy_racc, _, _} | rest2] -> parse_string(rest2, [expression|result])
_ -> {:error, "Missing }", rest1}
end
end
end
def parse_string([{_, _, text} | rest], result) do
parse_string(rest, [text|result])
end
defp parse_string_prime([], result), do: {:ok, compress_result(result, []), []}
defp parse_string_prime([{_, _, text} | rest], result), do: parse_string(rest, [text | result])
defp end_of_params?([]), do: true
defp end_of_params?([{:sy_rparen, _, _}|_]), do: true
defp end_of_params?([{:sy_racc, _, _}|_]), do: true
defp end_of_params?([{:op6, _, _}|_]), do: true
defp end_of_params?([{:op9, _, _}|_]), do: true
defp end_of_params?(_), do: false
defp compress_result([], result), do: result
defp compress_result([s|t], result) when is_binary(s), do: compress_result(t, append_str(s, result))
defp compress_result([e|t], result), do: compress_result(t, [e|result])
defp append_str(s, []), do: [s]
defp append_str(s, [s1|tail]) when is_binary(s1), do: [s <> s1|tail]
defp append_str(s, tail), do: [s|tail]
defp skip_ws([{:ws,_,_}|tail]), do: skip_ws(tail)
defp skip_ws(tokens), do: tokens
end | lib/vim_compiler/parser.ex | 0.634656 | 0.514034 | parser.ex | starcoder |
defmodule Util.HexParser do
@moduledoc """
This module defines some helper functions for playing around with raw memory from a ROM.
"""
use Bitwise
@block_size 16
def convert_file(input_file, output_file) do
output_file
|> File.open!([:write], handle_file(input_file))
end
def calculate_checksum(input_file) do
checksum =
input_file
|> File.open!([:read, :binary], &read_and_sum(&1, 0))
inverse_checksum = bxor(checksum, 0xFFFF)
{format_byte(checksum, 4), format_byte(inverse_checksum, 4)}
end
defp read_and_sum(file_pid, curr_total) do
case IO.binread(file_pid, 1) do
:eof ->
curr_total &&& 0xFFFF
<<byte::size(8)>> ->
read_and_sum(file_pid, curr_total + byte)
end
end
defp handle_file(input_file) do
fn output_pid ->
input_file
|> File.open!([:read, :binary], &read_file(&1, output_pid, 0))
end
end
defp read_file(input_pid, output_pid, block_number) do
case IO.binread(input_pid, @block_size) do
:eof ->
:ok
block ->
formatted_block = (block_number * @block_size) |> format_result(block)
IO.write(output_pid, formatted_block)
read_file(input_pid, output_pid, block_number + 1)
end
end
defp format_result(index, block) do
[[format_index(index), ": "], format_block(block), ["\r\n"]]
end
defp format_block(<<
b0::size(8),
b1::size(8),
b2::size(8),
b3::size(8),
b4::size(8),
b5::size(8),
b6::size(8),
b7::size(8),
b8::size(8),
b9::size(8),
bA::size(8),
bB::size(8),
bC::size(8),
bD::size(8),
bE::size(8),
bF::size(8)
>>) do
fhex = &format_byte(&1, 2)
fbin = &format_printable_byte(&1)
hex = [
fhex.(b0),
" ",
fhex.(b1),
" ",
fhex.(b2),
" ",
fhex.(b3),
" ",
fhex.(b4),
" ",
fhex.(b5),
" ",
fhex.(b6),
" ",
fhex.(b7),
" ",
fhex.(b8),
" ",
fhex.(b9),
" ",
fhex.(bA),
" ",
fhex.(bB),
" ",
fhex.(bC),
" ",
fhex.(bD),
" ",
fhex.(bE),
" ",
fhex.(bF)
]
ascii = [
"|",
fbin.(b0),
fbin.(b1),
fbin.(b2),
fbin.(b3),
fbin.(b4),
fbin.(b5),
fbin.(b6),
fbin.(b7),
fbin.(b8),
fbin.(b9),
fbin.(bA),
fbin.(bB),
fbin.(bC),
fbin.(bD),
fbin.(bE),
fbin.(bF),
"|"
]
[hex, [" "], ascii]
end
defp format_byte(byte, length) do
byte
|> Integer.to_string(16)
|> String.pad_leading(length, "0")
end
defp format_printable_byte(byte) when byte >= 32 and byte <= 127 do
case String.valid?(to_string([byte])) do
true -> [byte]
_ -> "."
end
end
defp format_printable_byte(_byte) do
"."
end
defp format_index(index) do
<<bank::binary-size(2), remainder::binary>> =
index
|> Integer.to_string(16)
|> String.pad_leading(6, "0")
bank <> " " <> remainder
end
end | lib/util/hex_parser.ex | 0.558207 | 0.463019 | hex_parser.ex | starcoder |
defmodule CSSEx.Helpers.Shared do
@moduledoc false
alias CSSEx.Helpers.Error
@appendable_first_char CSSEx.Helpers.SelectorChars.appendable_first_char()
@line_terminators CSSEx.Helpers.LineTerminators.code_points()
# increment the column token count
def inc_col(data, amount \\ 1)
def inc_col(%{column: column, no_count: 0} = data, amount),
do: %{data | column: column + amount}
def inc_col(data, _), do: data
# increment the line and reset the column
def inc_line(data, amount \\ 1)
def inc_line(%{line: line, no_count: 0} = data, amount),
do: %{data | column: 0, line: line + amount}
def inc_line(data, _), do: data
def inc_no_count(%{no_count: no_count} = data, amount \\ 1) do
new_count =
case no_count + amount do
n when n >= 0 -> n
_ -> 0
end
%{data | no_count: new_count}
end
def generate_prefix(%{current_chain: cc, prefix: nil}), do: cc
def generate_prefix(%{current_chain: cc, prefix: prefix}), do: prefix ++ cc
# we only have one element but we do have a prefix, set the split chain to the prefix and reset the current_chain
def remove_last_from_chain(%{current_chain: [_], prefix: prefix} = data)
when not is_nil(prefix),
do: %{data | current_chain: [], split_chain: Enum.join(prefix, ",")}
# we only have one element so reset both chains
def remove_last_from_chain(%{current_chain: [_]} = data),
do: %{data | current_chain: [], split_chain: []}
# we have more than one
def remove_last_from_chain(%{current_chain: [_ | _] = chain, prefix: prefix} = data) do
[_ | new_chain] = :lists.reverse(chain)
new_chain_for_merge =
case prefix do
nil -> :lists.reverse(new_chain)
_ -> prefix ++ :lists.reverse(new_chain)
end
case split_chains(new_chain_for_merge) do
[_ | _] = splitted ->
%{data | current_chain: :lists.reverse(new_chain), split_chain: merge_split(splitted)}
{:error, error} ->
CSSEx.Parser.add_error(data, Error.error_msg(error))
end
end
def add_selector_to_chain(%{current_chain: cc, prefix: prefix} = data, selector) do
new_chain = [selector | :lists.reverse(cc)] |> :lists.reverse()
new_split =
case new_chain do
[_] when is_nil(prefix) -> [new_chain]
_ when is_nil(prefix) -> split_chains(new_chain)
_ -> split_chains(prefix ++ new_chain)
end
case new_split do
[_ | _] ->
%{data | current_chain: new_chain, split_chain: merge_split(new_split)}
{:error, error} ->
CSSEx.Parser.add_error(data, Error.error_msg(error))
end
end
def merge_split(split_chain) do
split_chain
|> Enum.map(fn chain -> Enum.join(chain, " ") end)
|> Enum.join(",")
end
def ampersand_join(initial), do: ampersand_join(initial, [])
def ampersand_join([<<"&", rem::binary>> | _], []),
do: throw({:error, {:invalid_parent_concat, rem}})
def ampersand_join([head, <<"&", rem::binary>> | t], acc) do
{new_head, joint} = check_head(head)
case is_trail_concat(rem) do
true ->
ampersand_join([new_head <> rem <> joint | t], acc)
false ->
case is_lead_concat(new_head) do
true ->
ampersand_join([rem <> new_head <> joint | t], acc)
false ->
throw({:error, {:invalid_component_concat, rem, head}})
end
end
end
def ampersand_join([head | t], acc) do
case Regex.split(~r/.?(?<amper>\(?&\)?).?$/, head,
include_captures: true,
on: [:amper],
trim: true
) do
[_] ->
ampersand_join(t, [acc | [head]])
[parent, "&"] ->
case :lists.reverse(acc) do
[previous | rem] ->
new_acc = :lists.reverse([previous, String.trim(parent) | rem])
ampersand_join(t, new_acc)
_ ->
throw({:error, {:invalid_parent_concat, parent}})
end
[pseudo, "(&)"] ->
case :lists.reverse(acc) do
[previous | rem] ->
new_acc = :lists.reverse(["#{pseudo}(#{previous})" | rem])
ampersand_join(t, new_acc)
_ ->
throw({:error, {:invalid_parent_concat, pseudo}})
end
end
end
def ampersand_join([], acc), do: :lists.flatten(acc)
def check_head(head) do
case Regex.split(~r/.?(?<amper>\(?&\)?).?$/, head,
include_captures: true,
on: [:amper],
trim: true
) do
[_] -> {head, ""}
[parent, "&"] -> {parent, "&"}
[pseudo, "(&)"] -> {pseudo, "(&)"}
end
end
Enum.each(@appendable_first_char, fn char ->
def is_trail_concat(<<unquote(char)::utf8, _::binary>>), do: true
end)
def is_trail_concat(_), do: false
Enum.each(@appendable_first_char, fn char ->
def is_lead_concat(<<unquote(char)::utf8, _::binary>>), do: true
end)
def is_lead_concat(_), do: false
@doc """
Produces a list of lists where each list is a chain of selectors, representing all combinations between the selectors that need to occurr when a "," comma is found.
If we have a cssex rule of:
.class_1, .class_2 {
&.class_3, .class_4 {
}
}
Then we have a chain that we can split as:
iex> split_chains([".class_1, .class_2", "&.class_3, .class_4"])
[
[".class_1", "&.class_3"],
[".class_1", ".class_4"],
[".class_2", "&.class_3"],
[".class_2", ".class_4"]
]
These then can be passed through `ampersand_join` in order to produce:
[
[".class_1.class_3"],
[".class_1", ".class_4"],
[".class_2.class_3"],
[".class_2", ".class_4"]
]
Then a list of final strings:
[
".class_1.class_3",
".class_1 .class_4",
".class_2.class_3",
".class_2 .class_4"
]
Which then can be joined together into a single css declaration:
".class_1.class_3, .class_1 .class_4, .class_2.class_3, .class_2 .class_4"
"""
@spec split_chains(list(String.t())) :: list(list(String.t()))
def split_chains(initial), do: split_chains(initial, [])
def split_chains([], acc) do
try do
Enum.map(acc, fn
chain when is_list(chain) ->
chain
|> :lists.flatten()
|> ampersand_join()
chain ->
[chain]
end)
catch
{:error, _} = error -> error
end
end
def split_chains([head | t], []) do
splits =
head
|> String.split(",")
|> Enum.map(fn el -> String.trim(el) end)
split_chains(t, splits)
end
def split_chains([head | t], acc) do
splits =
head
|> String.split(",")
|> Enum.map(fn el -> String.trim(el) end)
new_acc =
Enum.reduce(acc, [], fn chain, i_acc_1 ->
Enum.reduce(splits, i_acc_1, fn cur, i_acc_2 ->
[[[chain | [cur]] |> :lists.flatten()] | i_acc_2]
end)
end)
split_chains(t, new_acc)
end
def search_args_split(text, n), do: search_args_split(text, n, 0, [], [])
def search_args_split([], _, _, acc, full_acc) do
final_full_acc =
case IO.chardata_to_string(acc) |> String.trim() do
"" -> full_acc
final_arg -> [final_arg | full_acc]
end
:lists.reverse(final_full_acc)
end
def search_args_split([char | rem], 0, levels, acc, full_acc) do
search_args_split(rem, 0, levels, [acc, char], full_acc)
end
def search_args_split([?) | rem], n, levels, acc, full_acc)
when levels > 0 and n > 0 do
search_args_split(rem, n, levels - 1, [acc, ?)], full_acc)
end
def search_args_split([?( | rem], n, levels, acc, full_acc),
do: search_args_split(rem, n, levels + 1, [acc, ?(], full_acc)
def search_args_split([?, | rem], n, 0, acc, full_acc),
do: search_args_split(rem, n - 1, 0, [], [IO.chardata_to_string(acc) | full_acc])
def search_args_split([char | rem], n, levels, acc, full_acc) do
search_args_split(rem, n, levels, [acc, char], full_acc)
end
def search_for(content, target), do: search_for(content, target, [])
Enum.each(['{', ';'], fn chars ->
def search_for(unquote(chars) ++ rem, unquote(chars), acc), do: {:ok, {rem, acc}}
end)
def search_for([char | rem], chars, acc), do: search_for(rem, chars, [acc | [char]])
def search_for([], _, acc), do: {:error, {[], acc}}
def block_search([125 | _rem], 1, acc), do: {:ok, acc}
def block_search([125 | rem], n, acc), do: block_search(rem, n - 1, [acc, "}"])
def block_search([123 | rem], n, acc), do: block_search(rem, n + 1, [acc, "{"])
def block_search([char | rem], n, acc), do: block_search(rem, n, [acc, char])
def block_search([], _, _acc), do: {:error, {:block_search, :no_closing}}
def valid_attribute_kv?(key, val)
when is_binary(key) and
is_binary(val) and
byte_size(key) > 0 and
byte_size(val) > 0,
do: true
def valid_attribute_kv?(_, _), do: false
def calc_line_offset(eex_lines, final) do
lines =
for <<char <- final>>, <<char>> in @line_terminators, reduce: 0 do
acc -> acc + 1
end
eex_lines - lines
end
def file_and_line_opts(%{file: nil, line: line}), do: [line: line || 0]
def file_and_line_opts(%{file: file, line: line}),
do: [file: file, line: line || 0]
end | lib/helpers/shared.ex | 0.700792 | 0.540742 | shared.ex | starcoder |
defmodule ChexDigits.Rule do
@moduledoc """
`%Rule{
digits: list,
input_alphabet: list,
output_alphabet: list,
module: integer | nil,
module_type: atom,
weights: list | non_neg_integer,
weight_alignment: atom,
per_term_function: function
}`
`digits`: a List of digits for which the checksum will be calculated
`module`: the module to calculate the final remainder.
If `nil`, the number will be returned untouched
`module_type`: one of:
- `:standard`: the checksum module will be: `rem(sum, module)`
- `:module_minus`: the checksum module will be: `module - rem(sum, module)`
`weights`:
The enumerable that contains the weights to be used when calculating the checksum.
Can also be a number.
For `input_alphabet` and `output_alphabet`, if the digit is not specified, itself is returned
`input_alphabet`:
A map that translates the `digits` list into a numerical representation
e.g.: %{"0" => 0, "1" => 1, "X" => 10}
`output_alphabet`:
A map that translates the calculated check digit into a string representation:
e.g.:
check_digit = 11 - rem(1 * 7 + 2 * 6 + 3 * 5, 11)
# Here, check_digit == 10. However, it we need the results to conform to a single character.
# Therefore, we use the folowing output_alphabet, which maps every digit 0 - 9 onto its character
# and the number `10` is mapped onto the character `"X"`.
output_alphabet = %{10 => "X"}
`per_term_function`:
An optional argument that specifies the function to be applied to the result of each multiplication in the weighted sum.
"""
alias ChexDigits.Helper, as: H
defstruct [
:digits,
:input_alphabet,
:output_alphabet,
:module,
:module_type,
:weights,
:weight_alignment,
:per_term_function
]
@spec new(
List.t() | String.t(),
Map.t(),
Map.t(),
non_neg_integer,
atom,
List.t() | non_neg_integer,
atom,
function
) :: %__MODULE__{}
def new(
digits,
input_alphabet,
output_alphabet,
module,
module_type,
weights,
weight_alignment \\ :right,
per_term_function \\ & &1
) do
with digits <- H.to_list(digits),
weights <- H.to_list(weights) do
%__MODULE__{
digits: digits,
input_alphabet: input_alphabet,
output_alphabet: output_alphabet,
module: module,
module_type: module_type,
weights: weights,
weight_alignment: weight_alignment,
per_term_function: per_term_function
}
end
end
end | lib/rule.ex | 0.884688 | 0.843251 | rule.ex | starcoder |
defmodule SteamEx.ISteamEconomy do
@moduledoc """
A secondary interface to interact with the [Steam Economy](https://partner.steamgames.com/doc/features/inventory/economy).
See also: [IGameInventory](https://partner.steamgames.com/doc/webapi/IGameInventory).
For more info on how to use the Steamworks Web API please see the [Web API Overview](https://partner.steamgames.com/doc/webapi_overview).
"""
import SteamEx.API.Base
@interface "ISteamEconomy"
@doc """
| Name | Type | Required | Description |
| key | string | ✔ | Steamworks Web API user authentication key.|
| appid | uint32 | ✔ | Must be a steam economy app.|
| language | string | | The user's local language|
| class_count | uint32 | ✔ | Number of classes requested. Must be at least one.|
| classid0 | uint64 | ✔ | Class ID of the nth class.|
| instanceid0 | uint64 | | Instance ID of the nth class.|
See other: [https://partner.steamgames.com/doc/webapi/ISteamEconomy#GetAssetClassInfo](https://partner.steamgames.com/doc/webapi/ISteamEconomy#GetAssetClassInfo)
"""
def get_asset_class_info(access_key, params \\ %{}, headers \\ %{}) do
get(@interface <> "/GetAssetClassInfo/v1/", access_key, params, headers)
end
@doc """
Returns prices and categories for items that users are able to purchase.
| Name | Type | Required | Description |
| key | string | ✔ | Steamworks Web API user authentication key.|
| appid | uint32 | ✔ | Must be a steam economy app.|
| currency | string | | The currency to filter for|
| language | string | | The user's local language|
See other: [https://partner.steamgames.com/doc/webapi/ISteamEconomy#GetAssetPrices](https://partner.steamgames.com/doc/webapi/ISteamEconomy#GetAssetPrices)
"""
def get_asset_prices(access_key, params \\ %{}, headers \\ %{}) do
get(@interface <> "/GetAssetPrices/v1/", access_key, params, headers)
end
end | lib/interfaces/i_steam_economy.ex | 0.685107 | 0.550849 | i_steam_economy.ex | starcoder |
defmodule RDF.XSD.Boolean do
@moduledoc """
`RDF.XSD.Datatype` for XSD booleans.
"""
@type valid_value :: boolean
@type input_value :: RDF.Literal.t() | valid_value | number | String.t() | any
use RDF.XSD.Datatype.Primitive,
name: "boolean",
id: RDF.Utils.Bootstrapping.xsd_iri("boolean")
alias RDF.XSD
def_applicable_facet XSD.Facets.Pattern
@doc false
def pattern_conform?(pattern, _value, lexical) do
XSD.Facets.Pattern.conform?(pattern, lexical)
end
@impl XSD.Datatype
def lexical_mapping(lexical, _) do
with lexical do
cond do
lexical in ~W[true 1] -> true
lexical in ~W[false 0] -> false
true -> @invalid_value
end
end
end
@impl XSD.Datatype
@spec elixir_mapping(valid_value | integer | any, Keyword.t()) :: value
def elixir_mapping(value, _)
def elixir_mapping(value, _) when is_boolean(value), do: value
def elixir_mapping(1, _), do: true
def elixir_mapping(0, _), do: false
def elixir_mapping(_, _), do: @invalid_value
@impl RDF.Literal.Datatype
def do_cast(value)
def do_cast(%XSD.String{} = xsd_string) do
xsd_string.value |> new() |> canonical()
end
def do_cast(literal) do
cond do
XSD.Decimal.datatype?(literal) ->
!Decimal.equal?(literal.value, 0) |> new()
XSD.Numeric.datatype?(literal) ->
new(literal.value not in [0, 0.0, :nan])
true ->
super(literal)
end
end
@doc """
Returns an Effective Boolean Value (EBV).
The Effective Boolean Value is an algorithm to coerce values to a `RDF.XSD.Boolean`.
It is specified and used in the SPARQL query language and is based upon XPath's
`fn:boolean`. Other than specified in these specs any value which can not be
converted into a boolean results in `nil`.
see
- <https://www.w3.org/TR/xpath-31/#id-ebv>
- <https://www.w3.org/TR/sparql11-query/#ebv>
"""
@spec ebv(input_value) :: RDF.Literal.t() | nil
def ebv(value)
def ebv(%RDF.Literal{literal: literal}), do: ebv(literal)
def ebv(true), do: XSD.Boolean.Value.true()
def ebv(false), do: XSD.Boolean.Value.false()
def ebv(%__MODULE__{value: nil}), do: XSD.Boolean.Value.false()
def ebv(%__MODULE__{} = value), do: literal(value)
def ebv(%XSD.String{} = string) do
if String.length(string.value) == 0,
do: XSD.Boolean.Value.false(),
else: XSD.Boolean.Value.true()
end
def ebv(%datatype{} = literal) do
if XSD.Numeric.datatype?(datatype) do
if datatype.valid?(literal) and
not (datatype.value(literal) == 0 or datatype.value(literal) == :nan),
do: XSD.Boolean.Value.true(),
else: XSD.Boolean.Value.false()
end
end
def ebv(value) when is_binary(value) or is_number(value) do
value |> RDF.Literal.coerce() |> ebv()
end
def ebv(_), do: nil
@doc """
Alias for `ebv/1`.
"""
@spec effective(input_value) :: RDF.Literal.t() | nil
def effective(value), do: ebv(value)
@doc """
Returns `RDF.XSD.true` if the effective boolean value of the given argument is `RDF.XSD.false`, or `RDF.XSD.false` if it is `RDF.XSD.true`.
Otherwise it returns `nil`.
## Examples
iex> RDF.XSD.Boolean.fn_not(RDF.XSD.true)
RDF.XSD.false
iex> RDF.XSD.Boolean.fn_not(RDF.XSD.false)
RDF.XSD.true
iex> RDF.XSD.Boolean.fn_not(true)
RDF.XSD.false
iex> RDF.XSD.Boolean.fn_not(false)
RDF.XSD.true
iex> RDF.XSD.Boolean.fn_not(42)
RDF.XSD.false
iex> RDF.XSD.Boolean.fn_not("")
RDF.XSD.true
iex> RDF.XSD.Boolean.fn_not(nil)
nil
see <https://www.w3.org/TR/xpath-functions/#func-not>
"""
@spec fn_not(input_value) :: t() | nil
def fn_not(value)
def fn_not(%RDF.Literal{literal: literal}), do: fn_not(literal)
def fn_not(value) do
case ebv(value) do
%RDF.Literal{literal: %__MODULE__{value: true}} -> XSD.Boolean.Value.false()
%RDF.Literal{literal: %__MODULE__{value: false}} -> XSD.Boolean.Value.true()
nil -> nil
end
end
@doc """
Returns the logical `AND` of the effective boolean value of the given arguments.
It returns `nil` if only one argument is `nil` and the other argument is
`RDF.XSD.true` and `RDF.XSD.false` if the other argument is `RDF.XSD.false`.
## Examples
iex> RDF.XSD.Boolean.logical_and(RDF.XSD.true, RDF.XSD.true)
RDF.XSD.true
iex> RDF.XSD.Boolean.logical_and(RDF.XSD.true, RDF.XSD.false)
RDF.XSD.false
iex> RDF.XSD.Boolean.logical_and(RDF.XSD.true, nil)
nil
iex> RDF.XSD.Boolean.logical_and(nil, RDF.XSD.false)
RDF.XSD.false
iex> RDF.XSD.Boolean.logical_and(nil, nil)
nil
see <https://www.w3.org/TR/sparql11-query/#func-logical-and>
"""
@spec logical_and(input_value, input_value) :: t() | nil
def logical_and(left, right)
def logical_and(%RDF.Literal{literal: left}, right), do: logical_and(left, right)
def logical_and(left, %RDF.Literal{literal: right}), do: logical_and(left, right)
def logical_and(left, right) do
case ebv(left) do
%RDF.Literal{literal: %__MODULE__{value: false}} ->
XSD.Boolean.Value.false()
%RDF.Literal{literal: %__MODULE__{value: true}} ->
case ebv(right) do
%RDF.Literal{literal: %__MODULE__{value: true}} -> XSD.Boolean.Value.true()
%RDF.Literal{literal: %__MODULE__{value: false}} -> XSD.Boolean.Value.false()
nil -> nil
end
nil ->
if match?(%RDF.Literal{literal: %__MODULE__{value: false}}, ebv(right)) do
XSD.Boolean.Value.false()
end
end
end
@doc """
Returns the logical `OR` of the effective boolean value of the given arguments.
It returns `nil` if only one argument is `nil` and the other argument is
`RDF.XSD.false` and `RDF.XSD.true` if the other argument is `RDF.XSD.true`.
## Examples
iex> RDF.XSD.Boolean.logical_or(RDF.XSD.true, RDF.XSD.false)
RDF.XSD.true
iex> RDF.XSD.Boolean.logical_or(RDF.XSD.false, RDF.XSD.false)
RDF.XSD.false
iex> RDF.XSD.Boolean.logical_or(RDF.XSD.true, nil)
RDF.XSD.true
iex> RDF.XSD.Boolean.logical_or(nil, RDF.XSD.false)
nil
iex> RDF.XSD.Boolean.logical_or(nil, nil)
nil
see <https://www.w3.org/TR/sparql11-query/#func-logical-or>
"""
@spec logical_or(input_value, input_value) :: t() | nil
def logical_or(left, right)
def logical_or(%RDF.Literal{literal: left}, right), do: logical_or(left, right)
def logical_or(left, %RDF.Literal{literal: right}), do: logical_or(left, right)
def logical_or(left, right) do
case ebv(left) do
%RDF.Literal{literal: %__MODULE__{value: true}} ->
XSD.Boolean.Value.true()
%RDF.Literal{literal: %__MODULE__{value: false}} ->
case ebv(right) do
%RDF.Literal{literal: %__MODULE__{value: true}} -> XSD.Boolean.Value.true()
%RDF.Literal{literal: %__MODULE__{value: false}} -> XSD.Boolean.Value.false()
nil -> nil
end
nil ->
if match?(%RDF.Literal{literal: %__MODULE__{value: true}}, ebv(right)) do
XSD.Boolean.Value.true()
end
end
end
end | lib/rdf/xsd/datatypes/boolean.ex | 0.897863 | 0.603406 | boolean.ex | starcoder |
defprotocol Timber.Eventable do
@moduledoc """
Converts a data structure into a `Timber.Event.t`. This is called on any data structure passed
in the `:event` metadata key passed to `Logger`.
For example, this protocol is how we're able to support maps:
```elixir
event_data = %{customer_id: "xiaus1934", amount: 1900, currency: "USD"}
Logger.info "Payment rejected", event: event_data
```
This is achieved by:
```elixir
defimpl Timber.Eventable, for: Map do
def to_event(%{type: type, data: data}) do
%Timber.Events.CustomEvent{
type: type,
data: data
}
end
end
```
## What about custom events and structs?
We recommend defining a struct and calling `use Timber.Events.CustomEvent` in that module.
This takes care of everything automatically. See `Timber.Events.CustomEvent` for examples.
"""
@fallback_to_any true
@doc """
Converts the data structure into a `Timber.Event.t`.
"""
@spec to_event(any) :: Timber.Event.t
def to_event(data)
end
defimpl Timber.Eventable, for: Timber.Events.ChannelJoinEvent do
def to_event(event), do: event
end
defimpl Timber.Eventable, for: Timber.Events.ChannelReceiveEvent do
def to_event(event), do: event
end
defimpl Timber.Eventable, for: Timber.Events.ControllerCallEvent do
def to_event(event), do: event
end
defimpl Timber.Eventable, for: Timber.Events.CustomEvent do
def to_event(event), do: event
end
defimpl Timber.Eventable, for: Timber.Events.ErrorEvent do
def to_event(event), do: event
end
defimpl Timber.Eventable, for: Timber.Events.HTTPRequestEvent do
def to_event(event), do: event
end
defimpl Timber.Eventable, for: Timber.Events.HTTPResponseEvent do
def to_event(event), do: event
end
defimpl Timber.Eventable, for: Timber.Events.SQLQueryEvent do
def to_event(event), do: event
end
defimpl Timber.Eventable, for: Timber.Events.TemplateRenderEvent do
def to_event(event), do: event
end
defimpl Timber.Eventable, for: Map do
def to_event(%{type: type, data: data}) do
%Timber.Events.CustomEvent{
type: type,
data: data
}
end
def to_event(map) when map_size(map) == 1 do
[type] = Map.keys(map)
[data] = Map.values(map)
%Timber.Events.CustomEvent{
type: type,
data: data
}
end
end
defimpl Timber.Eventable, for: Any do
def to_event(%{__exception__: true} = error) do
Timber.Events.ErrorEvent.from_exception(error)
end
end | lib/timber/eventable.ex | 0.923782 | 0.892281 | eventable.ex | starcoder |
defmodule Rummage.Ecto.Hook.Paginate do
@moduledoc """
`Rummage.Ecto.Hook.Paginate` is the default pagination hook that comes with
`Rummage.Ecto`.
This module provides a operations that can add pagination functionality to
a pipeline of `Ecto` queries. This module works by taking a `per_page`, which
it uses to add a `limit` to the query and by setting the `offset` using the
`page` variable, which signifies the current page of entries to be displayed.
NOTE: This module doesn't return a list of entries, but a `Ecto.Query.t`.
This module `uses` `Rummage.Ecto.Hook`.
_____________________________________________________________________________
# ABOUT:
## Arguments:
This Hook expects a `queryable` (an `Ecto.Queryable`) and
`paginate_params` (a `Map`). The map should be in the format:
`%{per_page: 10, page: 1}`
Details:
* `per_page`: Specifies the entries in each page.
* `page`: Specifies the `page` number.
For example, if we want to paginate products, we would
do the following:
```elixir
Rummage.Ecto.Hook.Paginate.run(Product, %{per_page: 10, page: 1})
```
_____________________________________________________________________________
# ASSUMPTIONS/NOTES:
NONE: This Hook should work for all the `Schema` types. Whether the schema has
a primary_key or not, this should handle that.
_____________________________________________________________________________
## USAGE:
To add pagination to a `Ecto.Queryable`, simply do the following:
```ex
Rummage.Ecto.Hook.Paginate.run(queryable, %{per_page: 10, page: 2})
```
## Overriding:
This module can be overridden with a custom module while using `Rummage.Ecto`
in `Ecto` struct module.
In the `Ecto` module:
```elixir
Rummage.Ecto.rummage(queryable, rummage, paginate: CustomHook)
```
OR
Globally for all models in `config.exs`:
```elixir
config :rummage_ecto,
Rummage.Ecto,
.paginate: CustomHook
```
The `CustomHook` must use `Rummage.Ecto.Hook`. For examples of `CustomHook`,
check out some `custom_hooks` that are shipped with `Rummage.Ecto`:
`Rummage.Ecto.CustomHook.SimpleSearch`, `Rummage.Ecto.CustomHook.SimpleSort`,
Rummage.Ecto.CustomHook.SimplePaginate
"""
use Rummage.Ecto.Hook
import Ecto.Query
@expected_keys ~w{per_page page}a
@err_msg ~s{Error in params, No values given for keys: }
@per_page 10
@doc """
This is the callback implementation of Rummage.Ecto.Hook.run/2.
Builds a paginate `Ecto.Query.t` on top of a given `Ecto.Query.t` variable
with given `params`.
Besides an `Ecto.Query.t` an `Ecto.Schema` module can also be passed as it
implements `Ecto.Queryable`
Params is a `Map` which is expected to have the keys `#{Enum.join(@expected_keys, ", ")}`.
If an expected key isn't given, a `Runtime Error` is raised.
## Examples
When an empty map is passed as `params`:
iex> alias Rummage.Ecto.Hook.Paginate
iex> import Ecto.Query
iex> Paginate.run(Parent, %{})
** (RuntimeError) Error in params, No values given for keys: per_page, page
When a non-empty map is passed as `params`, but with a missing key:
iex> alias Rummage.Ecto.Hook.Paginate
iex> import Ecto.Query
iex> Paginate.run(Parent, %{per_page: 10})
** (RuntimeError) Error in params, No values given for keys: page
When a valid map of params is passed with an `Ecto.Schema` module:
iex> alias Rummage.Ecto.Hook.Paginate
iex> import Ecto.Query
iex> Paginate.run(Rummage.Ecto.Product, %{per_page: 10, page: 1})
#Ecto.Query<from p0 in Rummage.Ecto.Product, limit: ^10, offset: ^0>
When the `queryable` passed is an `Ecto.Query` variable:
iex> alias Rummage.Ecto.Hook.Paginate
iex> import Ecto.Query
iex> queryable = from u in "products"
#Ecto.Query<from p0 in "products">
iex> Paginate.run(queryable, %{per_page: 10, page: 2})
#Ecto.Query<from p0 in "products", limit: ^10, offset: ^10>
More examples:
iex> alias Rummage.Ecto.Hook.Paginate
iex> import Ecto.Query
iex> rummage = %{per_page: 1, page: 1}
iex> queryable = from u in "products"
#Ecto.Query<from p0 in "products">
iex> Paginate.run(queryable, rummage)
#Ecto.Query<from p0 in "products", limit: ^1, offset: ^0>
iex> alias Rummage.Ecto.Hook.Paginate
iex> import Ecto.Query
iex> rummage = %{per_page: 5, page: 2}
iex> queryable = from u in "products"
#Ecto.Query<from p0 in "products">
iex> Paginate.run(queryable, rummage)
#Ecto.Query<from p0 in "products", limit: ^5, offset: ^5>
"""
@spec run(Ecto.Query.t(), map()) :: Ecto.Query.t()
def run(queryable, paginate_params) do
:ok = validate_params(paginate_params)
handle_paginate(queryable, paginate_params)
end
# Helper function which handles addition of paginated query on top of
# the sent queryable variable
defp handle_paginate(queryable, paginate_params) do
per_page = Map.get(paginate_params, :per_page)
page = Map.get(paginate_params, :page)
offset = per_page * (page - 1)
queryable
|> limit(^per_page)
|> offset(^offset)
end
# Helper function that validates the list of params based on
# @expected_keys list
defp validate_params(params) do
key_validations = Enum.map(@expected_keys, &Map.fetch(params, &1))
case Enum.filter(key_validations, &(&1 == :error)) do
[] -> :ok
_ -> raise @err_msg <> missing_keys(key_validations)
end
end
# Helper function used to build error message using missing keys
defp missing_keys(key_validations) do
key_validations
|> Enum.with_index()
|> Enum.filter(fn {v, _i} -> v == :error end)
|> Enum.map(fn {_v, i} -> Enum.at(@expected_keys, i) end)
|> Enum.map(&to_string/1)
|> Enum.join(", ")
end
@doc """
Callback implementation for Rummage.Ecto.Hook.format_params/3.
This function takes an `Ecto.Query.t` or `queryable`, `paginate_params` which
will be passed to the `run/2` function, but also takes a list of options,
`opts`.
The function expects `opts` to include a `repo` key which points to the
`Ecto.Repo` which will be used to calculate the `total_count` and `max_page`
for this paginate hook module.
## Examples
When a `repo` isn't passed in `opts` it gives an error:
iex> alias Rummage.Ecto.Hook.Paginate
iex> alias Rummage.Ecto.Category
iex> Paginate.format_params(Category, %{per_page: 1, page: 1}, [])
** (RuntimeError) Expected key `repo` in `opts`, got []
When `paginate_params` given aren't valid, it uses defaults to populate params:
iex> alias Rummage.Ecto.Hook.Paginate
iex> alias Rummage.Ecto.Category
iex> Ecto.Adapters.SQL.Sandbox.checkout(Rummage.Ecto.Repo)
iex> Paginate.format_params(Category, %{}, [repo: Rummage.Ecto.Repo])
%{max_page: 0, page: 1, per_page: 10, total_count: 0}
When `paginate_params` and `opts` given are valid:
iex> alias Rummage.Ecto.Hook.Paginate
iex> alias Rummage.Ecto.Category
iex> paginate_params = %{
...> per_page: 1,
...> page: 1
...> }
iex> repo = Rummage.Ecto.Repo
iex> Ecto.Adapters.SQL.Sandbox.checkout(repo)
iex> Paginate.format_params(Category, paginate_params, [repo: repo])
%{max_page: 0, page: 1, per_page: 1, total_count: 0}
When `paginate_params` and `opts` given are valid:
iex> alias Rummage.Ecto.Hook.Paginate
iex> alias Rummage.Ecto.Category
iex> paginate_params = %{
...> per_page: 1,
...> page: 1
...> }
iex> repo = Rummage.Ecto.Repo
iex> Ecto.Adapters.SQL.Sandbox.checkout(repo)
iex> repo.insert!(%Category{name: "name"})
iex> repo.insert!(%Category{name: "name2"})
iex> Paginate.format_params(Category, paginate_params, [repo: repo])
%{max_page: 2, page: 1, per_page: 1, total_count: 2}
When `paginate_params` and `opts` given are valid and when the `queryable`
passed has a `primary_key` defaulted to `id`.
iex> alias Rummage.Ecto.Hook.Paginate
iex> alias Rummage.Ecto.Category
iex> paginate_params = %{
...> per_page: 1,
...> page: 1
...> }
iex> repo = Rummage.Ecto.Repo
iex> Ecto.Adapters.SQL.Sandbox.checkout(repo)
iex> repo.insert!(%Category{name: "name"})
iex> repo.insert!(%Category{name: "name2"})
iex> Paginate.format_params(Category, paginate_params, [repo: repo])
%{max_page: 2, page: 1, per_page: 1, total_count: 2}
When `paginate_params` and `opts` given are valid and when the `queryable`
passed has a custom `primary_key`.
iex> alias Rummage.Ecto.Hook.Paginate
iex> alias Rummage.Ecto.Product
iex> paginate_params = %{
...> per_page: 1,
...> page: 1
...> }
iex> repo = Rummage.Ecto.Repo
iex> Ecto.Adapters.SQL.Sandbox.checkout(repo)
iex> repo.insert!(%Product{internal_code: "100"})
iex> repo.insert!(%Product{internal_code: "101"})
iex> Paginate.format_params(Product, paginate_params, [repo: repo])
%{max_page: 2, page: 1, per_page: 1, total_count: 2}
When `paginate_params` and `opts` given are valid and when the `queryable`
passed has a custom `primary_key`.
iex> alias Rummage.Ecto.Hook.Paginate
iex> alias Rummage.Ecto.Employee
iex> paginate_params = %{
...> per_page: 1,
...> page: 1
...> }
iex> repo = Rummage.Ecto.Repo
iex> Ecto.Adapters.SQL.Sandbox.checkout(repo)
iex> repo.insert!(%Employee{first_name: "First"})
iex> repo.insert!(%Employee{first_name: "Second"})
iex> Paginate.format_params(Employee, paginate_params, [repo: repo])
%{max_page: 2, page: 1, per_page: 1, total_count: 2}
When `paginate_params` and `opts` given are valid and when the `queryable`
passed is not a `Ecto.Schema` module, but an `Ecto.Query.t`.
iex> alias Rummage.Ecto.Hook.Paginate
iex> alias Rummage.Ecto.Employee
iex> paginate_params = %{
...> per_page: 1,
...> page: 1
...> }
iex> repo = Rummage.Ecto.Repo
iex> Ecto.Adapters.SQL.Sandbox.checkout(repo)
iex> repo.insert!(%Employee{first_name: "First"})
iex> repo.insert!(%Employee{first_name: "Second"})
iex> import Ecto.Query
iex> queryable = from u in Employee, where: u.first_name == "First"
iex> Paginate.format_params(queryable, paginate_params, [repo: repo])
%{max_page: 1, page: 1, per_page: 1, total_count: 1}
"""
@spec format_params(Ecto.Query.t(), map() | atom(), keyword()) :: map()
def format_params(queryable, {paginate_scope, page}, opts) do
module = get_module(queryable)
name = :"__rummage_paginate_#{paginate_scope}"
paginate_params =
case function_exported?(module, name, 1) do
true -> apply(module, name, [page])
_ -> raise "No scope `#{paginate_scope}` of type paginate defined in the #{module}"
end
format_params(queryable, paginate_params, opts)
end
def format_params(queryable, paginate_params, opts) do
paginate_params = populate_params(paginate_params, opts)
case Keyword.get(opts, :repo) do
nil -> raise "Expected key `repo` in `opts`, got #{inspect(opts)}"
repo -> get_params(queryable, paginate_params, repo)
end
end
# Helper function that populate the list of params based on
# @expected_keys list
defp populate_params(params, opts) do
params
|> Map.put_new(:per_page, Keyword.get(opts, :per_page, @per_page))
|> Map.put_new(:page, 1)
end
# Helper function which gets formatted list of params including
# page, per_page, total_count and max_page keys
defp get_params(queryable, paginate_params, repo) do
per_page = Map.get(paginate_params, :per_page)
%{
page: Map.get(paginate_params, :page),
per_page: per_page,
}
end
# Helper function which gets total count of a queryable based on
# the given repo.
# This excludes operations such as select, preload and order_by
# to make the query more effectient
defp get_total_count(queryable, repo) do
queryable
|> exclude(:select)
|> exclude(:preload)
|> exclude(:order_by)
|> get_count(repo, pk(queryable))
end
# This function gets count of a query and repo passed.
# When primary key passed is nil, it just gets all the elements
# and counts them, but when a primary key is passed it just counts
# the distinct primary keys
defp get_count(query, repo, nil) do
repo
|> apply(:all, [distinct(query, true)])
|> Enum.count()
end
defp get_count(query, repo, pk) do
query = select(query, [s], count(field(s, ^pk), :distinct))
case (apply(repo, :all, [query])) do
[hd | _] -> hd
_ -> 0
end
end
# Helper function which returns the primary key associated with a
# Queryable.
defp pk(queryable) do
schema =
queryable
|> Ecto.Queryable.to_query()
|> Rummage.Ecto.QueryUtils.schema_from_query()
case schema.__schema__(:primary_key) do
[] -> nil
list -> hd(list)
end
end
end | lib/rummage_ecto/hooks/paginate.ex | 0.797281 | 0.857828 | paginate.ex | starcoder |
defmodule Muster.Game do
alias Muster.Game.{Grid, Tile}
@type status :: :waiting_for_players | :on | :won | :lost | :stopped
@type player :: :player1 | :player2
@type t :: %__MODULE__{
status: status(),
players: [player()],
current_player: player() | nil,
grid: Grid.t(),
next_id: Tile.id(),
merged_tiles: [Tile.t()]
}
@type direction :: :left | :right | :up | :down
@enforce_keys [:status, :grid]
defstruct status: nil, grid: nil, next_id: 1, merged_tiles: [], players: [], current_player: nil
@first_tile 2
@new_tile 1
@winning_tile 2048
@number_of_players 2
@spec new() :: t()
def new() do
{grid, next_id} =
Grid.new()
|> Grid.put_tile_in_random_space(@first_tile)
|> Grid.put_ids(1)
%__MODULE__{status: :waiting_for_players, grid: grid, next_id: next_id}
end
@spec add_player(t(), player()) :: {:ok, t()} | {:error, :game_is_on}
def add_player(%__MODULE__{status: :waiting_for_players} = game, player) do
game = %{game | players: game.players ++ [player]}
game =
if length(game.players) == @number_of_players do
%{game | status: :on, current_player: hd(game.players)}
else
game
end
{:ok, game}
end
def add_player(%__MODULE__{}, _), do: {:error, :game_is_on}
@spec move(t(), player(), direction()) :: {:ok, t()} | {:error, :player_cant_move}
def move(%__MODULE__{status: :on, current_player: player} = game, player, direction) do
game =
game
|> move_tiles(direction)
|> check_win()
|> check_loss()
|> maybe_add_new_tile()
|> maybe_toggle_player()
{:ok, game}
end
def move(%__MODULE__{}, _, _), do: {:error, :player_cant_move}
defp move_tiles(game, direction) do
{grid, next_id} =
game.grid
|> Grid.move_tiles(direction)
|> Grid.put_ids(game.next_id)
tile_ids = Enum.map(grid, & &1.id)
merged_tiles = Enum.filter(game.grid, &(&1.id not in tile_ids))
%{game | grid: grid, next_id: next_id, merged_tiles: game.merged_tiles ++ merged_tiles}
end
defp check_win(game) do
if Grid.tile_present?(game.grid, @winning_tile) do
%{game | status: :won}
else
game
end
end
defp check_loss(game) do
if game.status == :on && Grid.count_spaces(game.grid) == 0 do
%{game | status: :lost}
else
game
end
end
defp maybe_add_new_tile(game) do
if game.status == :on do
{grid, next_id} =
game.grid
|> Grid.put_tile_in_random_space(@new_tile)
|> Grid.put_ids(game.next_id)
%{game | grid: grid, next_id: next_id}
else
game
end
end
defp maybe_toggle_player(game) do
if game.status == :on do
%{game | current_player: Enum.find(game.players, &(&1 != game.current_player))}
else
game
end
end
@spec stop(t()) :: t()
def stop(%__MODULE__{} = game) do
%{game | status: :stopped}
end
@spec ended?(t()) :: boolean()
def ended?(%__MODULE__{status: status}) do
status in ~w(won lost stopped)a
end
end | apps/muster/lib/muster/game.ex | 0.751557 | 0.468608 | game.ex | starcoder |
defmodule Cluster.Strategy.Kubernetes.DNSSRV do
@moduledoc """
This clustering strategy works by issuing a SRV query for the kubernetes headless service
under which the stateful set containing your nodes is running.
For more information, see the kubernetes stateful-application [documentation](https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#using-stable-network-identities)
* It will fetch the FQDN of all pods under the headless service and attempt to connect.
* It will continually monitor and update its connections according to the polling_interval (default 5s)
The `application_name` is configurable (you may have launched erlang with a different configured name),
but will in most cases be the name of your application
An example configuration is below:
config :libcluster,
topologies: [
k8s_example: [
strategy: #{__MODULE__},
config: [
service: "elixir-plug-poc",
application_name: "elixir_plug_poc",
polling_interval: 10_000]]]
An example of how this strategy extracts topology information from DNS follows:
```
bash-5.0# hostname -f
elixir-plug-poc-1.elixir-plug-poc.default.svc.cluster.local
bash-5.0# dig SRV elixir-plug-poc.default.svc.cluster.local
; <<>> DiG 9.14.3 <<>> SRV elixir-plug-poc.default.svc.cluster.local
;; global options: +cmd
;; Got answer:
;; WARNING: .local is reserved for Multicast DNS
;; You are currently testing what happens when an mDNS query is leaked to DNS
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 7169
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 2
;; QUESTION SECTION:
;elixir-plug-poc.default.svc.cluster.local. IN SRV
;; ANSWER SECTION:
elixir-plug-poc.default.svc.cluster.local. 30 IN SRV 10 50 0 elixir-plug-poc-0.elixir-plug-poc.default.svc.cluster.local.
elixir-plug-poc.default.svc.cluster.local. 30 IN SRV 10 50 0 elixir-plug-poc-1.elixir-plug-poc.default.svc.cluster.local.
;; ADDITIONAL SECTION:
elixir-plug-poc-0.elixir-plug-poc.default.svc.cluster.local. 30 IN A 10.1.0.95
elixir-plug-poc-1.elixir-plug-poc.default.svc.cluster.local. 30 IN A 10.1.0.96
;; Query time: 0 msec
;; SERVER: 10.96.0.10#53(10.96.0.10)
;; WHEN: Wed Jul 03 11:55:27 UTC 2019
;; MSG SIZE rcvd: 167
```
And here is an example of a corresponding kubernetes statefulset/service definition:
```yaml
apiVersion: v1
kind: Service
metadata:
name: elixir-plug-poc
labels:
app: elixir-plug-poc
spec:
ports:
- port: 4000
name: web
clusterIP: None
selector:
app: elixir-plug-poc
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elixir-plug-poc
spec:
serviceName: "elixir-plug-poc"
replicas: 2
selector:
matchLabels:
app: elixir-plug-poc
template:
metadata:
labels:
app: elixir-plug-poc
spec:
containers:
- name: elixir-plug-poc
image: binarytemple/elixir_plug_poc
args:
- foreground
env:
- name: ERLANG_COOKIE
value: "cookie"
imagePullPolicy: Always
ports:
- containerPort: 4000
name: http
protocol: TCP
```
"""
use GenServer
use Cluster.Strategy
import Cluster.Logger
alias Cluster.Strategy.State
@default_polling_interval 5_000
@impl true
def start_link(args), do: GenServer.start_link(__MODULE__, args)
@impl true
def init([%State{meta: nil} = state]) do
init([%State{state | :meta => MapSet.new()}])
end
def init([%State{} = state]) do
{:ok, load(state), 0}
end
@impl true
def handle_info(:timeout, state) do
handle_info(:load, state)
end
def handle_info(:load, state) do
{:noreply, load(state)}
end
def handle_info(_, state) do
{:noreply, state}
end
defp load(%State{topology: topology, meta: meta} = state) do
new_nodelist = MapSet.new(get_nodes(state))
added = MapSet.difference(new_nodelist, meta)
removed = MapSet.difference(meta, new_nodelist)
new_nodelist =
case Cluster.Strategy.disconnect_nodes(
topology,
state.disconnect,
state.list_nodes,
MapSet.to_list(removed)
) do
:ok ->
new_nodelist
{:error, bad_nodes} ->
# Add back the nodes which should have been removed, but which couldn't be for some reason
Enum.reduce(bad_nodes, new_nodelist, fn {n, _}, acc ->
MapSet.put(acc, n)
end)
end
new_nodelist =
case Cluster.Strategy.connect_nodes(
topology,
state.connect,
state.list_nodes,
MapSet.to_list(added)
) do
:ok ->
new_nodelist
{:error, bad_nodes} ->
# Remove the nodes which should have been added, but couldn't be for some reason
Enum.reduce(bad_nodes, new_nodelist, fn {n, _}, acc ->
MapSet.delete(acc, n)
end)
end
Process.send_after(
self(),
:load,
polling_interval(state)
)
%State{state | :meta => new_nodelist}
end
@spec get_nodes(State.t()) :: [atom()]
defp get_nodes(%State{topology: topology, config: config}) do
app_name = Keyword.fetch!(config, :application_name)
service = Keyword.fetch!(config, :service)
namespace = Keyword.fetch!(config, :namespace)
service_k8s_path = "#{service}.#{namespace}.svc.cluster.local."
resolver = Keyword.get(config, :resolver, &:inet_res.getbyname(&1, :srv))
cond do
app_name != nil and service != nil ->
headless_service = to_charlist(service_k8s_path)
case resolver.(headless_service) do
{:ok, {:hostent, _, _, :srv, _count, addresses}} ->
parse_response(addresses, app_name)
{:error, reason} ->
error(
topology,
"#{inspect(headless_service)} : lookup against #{service} failed: #{inspect(reason)}"
)
[]
end
app_name == nil ->
warn(
topology,
"kubernetes.DNS strategy is selected, but :application_name is not configured!"
)
[]
service == nil ->
warn(topology, "kubernetes strategy is selected, but :service is not configured!")
[]
:else ->
warn(topology, "kubernetes strategy is selected, but is not configured!")
[]
end
end
defp polling_interval(%State{config: config}) do
Keyword.get(config, :polling_interval, @default_polling_interval)
end
defp parse_response(addresses, app_name) do
addresses
|> Enum.map(&:erlang.list_to_binary(elem(&1, 3)))
|> Enum.map(&"#{app_name}@#{&1}")
|> Enum.map(&String.to_atom(&1))
end
end | lib/strategy/kubernetes_dns_srv.ex | 0.851968 | 0.731754 | kubernetes_dns_srv.ex | starcoder |
defmodule Radixir.Core.API do
@moduledoc false
# @moduledoc """
# Submits requests to Core API.
# """
alias Radixir.Util
@type body :: map
@type options :: keyword
@type error_message :: String.t()
@doc """
Submits request to `/network/configuration`.
## Parameters
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `funny cats very Jack 21!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/network/configuration](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/network/paths/~1network~1configuration/post)
"""
@spec get_network_configuration(options) :: {:ok, map} | {:error, map | error_message}
def get_network_configuration(options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/network/configuration", %{}, options)
end
end
@doc """
Submits request to `/network/status`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>, <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `funny cats very Jack 21!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/network/status](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/network/paths/~1network~1status/post)
"""
@spec get_network_status(body, options) :: {:ok, map} | {:error, map | error_message}
def get_network_status(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/network/status", body, options)
end
end
@doc """
Submits request to `/entity`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD> h39! LW, monitor K<PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `<PASSWORD>!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/entity](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/entity/paths/~1entity/post)
"""
@spec get_entity_information(body, options) :: {:ok, map} | {:error, map | error_message}
def get_entity_information(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/entity", body, options)
end
end
@doc """
Submits request to `/mempool`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD> h39! LW, monitor <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `<PASSWORD>!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/mempool](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/mempool/paths/~1mempool/post)
"""
@spec get_mempool_transactions(body, options) :: {:ok, map} | {:error, map | error_message}
def get_mempool_transactions(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/mempool", body, options)
end
end
@doc """
Submits request to `/mempool/transaction`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>! LW, monitor Kat darrel <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `funny cats very Jack 21!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/mempool/transaction](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/mempool/paths/~1mempool~1transaction/post)
"""
@spec get_mempool_transaction(body, options) :: {:ok, map} | {:error, map | error_message}
def get_mempool_transaction(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/mempool/transaction", body, options)
end
end
@doc """
Submits request to `/transactions`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD> love h39! LW, monitor Kat dar<PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `<PASSWORD>!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/transactions](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/transactions/paths/~1transactions/post)
"""
@spec get_committed_transactions(body, options) :: {:ok, map} | {:error, map | error_message}
def get_committed_transactions(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/transactions", body, options)
end
end
@doc """
Submits request to `/construction/derive`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>! LW, monitor Kat <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `<PASSWORD>!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/construction/derive](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/construction/paths/~1construction~1derive/post)
"""
@spec derive_entity_identifier(body, options) :: {:ok, map} | {:error, map | error_message}
def derive_entity_identifier(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/construction/derive", body, options)
end
end
@doc """
Submits request to `/construction/build`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>, monitor Kat <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `funny cats very Jack 21!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/construction/build](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/construction/paths/~1construction~1build/post)
"""
@spec build_transaction(body, options) :: {:ok, map} | {:error, map | error_message}
def build_transaction(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/construction/build", body, options)
end
end
@doc """
Submits request to `/construction/parse`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>, monitor Kat <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `fun<PASSWORD>!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/construction/parse](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/construction/paths/~1construction~1parse/post)
"""
@spec parse_transaction(body, options) :: {:ok, map} | {:error, map | error_message}
def parse_transaction(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/construction/parse", body, options)
end
end
@doc """
Submits request to `/construction/finalize`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>, monitor <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `fun<PASSWORD> 21!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/construction/finalize](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/construction/paths/~1construction~1finalize/post)
"""
@spec finalize_transaction(body, options) :: {:ok, map} | {:error, map | error_message}
def finalize_transaction(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/construction/finalize", body, options)
end
end
@doc """
Submits request to `/construction/hash`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `funny cats <PASSWORD> 21!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/construction/hash](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/construction/paths/~1construction~1hash/post)
"""
@spec get_transaction_hash(body, options) :: {:ok, map} | {:error, map | error_message}
def get_transaction_hash(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/construction/hash", body, options)
end
end
@doc """
Submits request to `/construction/submit`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `funny cats very Jack 21!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/construction/submit](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/construction/paths/~1construction~1submit/post)
"""
@spec submit_transaction(body, options) :: {:ok, map} | {:error, map | error_message}
def submit_transaction(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/construction/submit", body, options)
end
end
@doc """
Submits request to `/key/list`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>, monitor Kat <PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `<PASSWORD>!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/key/list](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/key/paths/~1key~1list/post)
"""
@spec get_public_keys(body, options) :: {:ok, map} | {:error, map | error_message}
def get_public_keys(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/key/list", body, options)
end
end
@doc """
Submits request to `/key/sign`.
## Parameters
- `body`: Request body.
- `options`: Keyword list that contains
- `url` (optional, string): If url is not in options then the url set in the configs will be used.
- any other options one may want to pass along to the http layer - for example `headers`
- `auth_index` (optional, integer): Index of the username + password combo to be used for endpoint authentication.
- `username`: (optional, string): Username to be used for endpoint authentication.
- `password`: (optional, string): Password to be used for endpoint authentication.
## Note
- Either `username` and `password` or `auth_index` must be provided.
- If all three are provided `auth_index` is used.
## Example
If the following usernames and passwords are exported:
```
export USERNAMES='admin, superadmin, metrics'
export PASSWORDS='<PASSWORD>!, <PASSWORD>! LW, monitor K<PASSWORD>!'
```
then passing `auth_index: 0` would lead to `admin` being used as the `username` and `<PASSWORD>!` being used as the `password` for endpoint authentication.
## Core API Documentation
- [/key/sign](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/radixdlt/radixdlt/1.1.0/radixdlt-core/radixdlt/src/main/java/com/radixdlt/api/core/api.yaml#tag/key/paths/~1key~1sign/post)
"""
@spec sign_transaction(body, options) :: {:ok, map} | {:error, map | error_message}
def sign_transaction(body, options \\ []) do
with {:ok, username, password, options} <- Util.get_auth_from_options(options),
{:ok, url, options} <- Util.get_url_from_options(options, :core) do
auth = [auth: {username, password}]
options = Keyword.merge(auth, options)
impl().post(url, "/key/sign", body, options)
end
end
defp impl, do: Application.get_env(:radixir, :http, Radixir.HTTP)
end | lib/radixir/core/api.ex | 0.912964 | 0.643875 | api.ex | starcoder |
defmodule Mix.Tasks.Do do
use Mix.Task
@shortdoc "Executes the tasks separated by plus"
@moduledoc """
Executes the tasks separated by `+`:
mix do compile --list + deps
The plus should be followed by at least one space before and after.
## Examples
The example below prints the available compilers and
then the list of dependencies.
mix do compile --list + deps
Note that the majority of Mix tasks are only executed once
per invocation. So for example, the following command will
only compile once:
mix do compile + some_other_command + compile
When `compile` is executed again, Mix will notice the task
has already ran, and skip it.
Inside umbrella projects, you can limit recursive tasks
(the ones that run inside every app) by selecting the
desired application via the `--app` flag after `do` and
before the first task:
mix do --app app1 --app app2 compile --list + deps
Elixir versions prior to v1.14 used the comma exclusively
to separate commands:
mix do compile --list + deps
Since then, the `+` operator has been introduced as a
separator for better support on Windows terminals.
## Command line options
* `--app` - limit recursive tasks to the given apps.
This option may be given multiple times and must come
before any of the tasks.
"""
# TODO: Deprecate using comma on Elixir v1.18
@impl true
def run(args) do
Mix.Task.reenable("do")
{apps, args} = extract_apps_from_args(args)
show_forgotten_apps_warning(apps)
Enum.each(gather_commands(args), fn [task | args] ->
if apps == [] do
Mix.Task.run(task, args)
else
Mix.Task.run_in_apps(task, apps, args)
end
end)
end
defp show_forgotten_apps_warning([]), do: nil
defp show_forgotten_apps_warning(apps) do
config = Mix.Project.config()
if Mix.Project.umbrella?(config) do
known_apps = Mix.Project.apps_paths(config)
for app <- apps, not Map.has_key?(known_apps, app) do
Mix.shell().info([:yellow, "warning: could not find application #{inspect(app)}"])
end
end
end
defp extract_apps_from_args(args) do
{opts, args} = OptionParser.parse_head!(args, strict: [app: :keep])
apps =
opts
|> Keyword.get_values(:app)
|> Enum.map(&String.to_atom/1)
{apps, args}
end
@doc false
def gather_commands(args) do
gather_commands(args, [], [])
end
defp gather_commands([head | rest], current, acc)
when binary_part(head, byte_size(head), -1) == "," do
current =
case binary_part(head, 0, byte_size(head) - 1) do
"" -> Enum.reverse(current)
part -> Enum.reverse([part | current])
end
gather_commands(rest, [], [current | acc])
end
defp gather_commands(["+" | rest], current, acc) do
gather_commands(rest, [], [Enum.reverse(current) | acc])
end
defp gather_commands([head | rest], current, acc) do
gather_commands(rest, [head | current], acc)
end
defp gather_commands([], current, acc) do
Enum.reverse([Enum.reverse(current) | acc])
end
end | lib/mix/lib/mix/tasks/do.ex | 0.664431 | 0.453685 | do.ex | starcoder |
defmodule Elixium.Validator do
alias Elixium.Block
alias Elixium.Utilities
alias Elixium.KeyPair
alias Elixium.Store.Ledger
alias Elixium.BlockEncoder
alias Elixium.Store.Oracle
alias Elixium.Transaction
@moduledoc """
Responsible for implementing the consensus rules to all blocks and transactions
"""
@doc """
A block is considered valid if the index is greater than the index of the previous block,
the previous_hash is equal to the hash of the previous block, and the hash of the block,
when recalculated, is the same as what the listed block hash is
"""
@spec is_block_valid?(Block, number) :: :ok | {:error, any}
def is_block_valid?(block, difficulty, last_block \\ Ledger.last_block(), pool_check \\ &Oracle.inquire(:"Elixir.Elixium.Store.UtxoOracle", {:in_pool?, [&1]})) do
if :binary.decode_unsigned(block.index) == 0 do
with :ok <- valid_coinbase?(block),
:ok <- valid_transactions?(block, pool_check),
:ok <- valid_merkle_root?(block.merkle_root, block.transactions),
:ok <- valid_hash?(block, difficulty),
:ok <- valid_timestamp?(block),
:ok <- valid_block_size?(block) do
:ok
else
err -> err
end
else
with :ok <- valid_index(block.index, last_block.index),
:ok <- valid_prev_hash?(block.previous_hash, last_block.hash),
:ok <- valid_coinbase?(block),
:ok <- valid_transactions?(block, pool_check),
:ok <- valid_merkle_root?(block.merkle_root, block.transactions),
:ok <- valid_hash?(block, difficulty),
:ok <- valid_timestamp?(block),
:ok <- valid_block_size?(block) do
:ok
else
err -> err
end
end
end
@spec valid_merkle_root?(binary, list) :: :ok | {:error, :invalid_merkle_root}
defp valid_merkle_root?(merkle_root, transactions) do
calculated_root =
transactions
|> Enum.map(&:erlang.term_to_binary/1)
|> Utilities.calculate_merkle_root()
if calculated_root == merkle_root, do: :ok, else: {:error, :invalid_merkle_root}
end
@spec valid_index(number, number) :: :ok | {:error, {:invalid_index, number, number}}
defp valid_index(index, prev_index) when index > prev_index, do: :ok
defp valid_index(idx, prev), do: {:error, {:invalid_index, prev, idx}}
@spec valid_prev_hash?(String.t(), String.t()) :: :ok | {:error, {:wrong_hash, {:doesnt_match_last, String.t(), String.t()}}}
defp valid_prev_hash?(prev_hash, last_block_hash) when prev_hash == last_block_hash, do: :ok
defp valid_prev_hash?(phash, lbhash), do: {:error, {:wrong_hash, {:doesnt_match_last, phash, lbhash}}}
@spec valid_hash?(Block, number) :: :ok | {:error, {:wrong_hash, {:too_high, String.t(), number}}}
defp valid_hash?(b, difficulty) do
with :ok <- compare_hash(b, b.hash),
:ok <- beat_target?(b.hash, difficulty) do
:ok
else
err -> err
end
end
defp beat_target?(hash, difficulty) do
if Block.hash_beat_target?(%{hash: hash, difficulty: difficulty}) do
:ok
else
{:error, {:wrong_hash, {:too_high, hash, difficulty}}}
end
end
@spec compare_hash(Block, String.t()) :: :ok | {:error, {:wrong_hash, {:doesnt_match_provided, String.t(), String.t()}}}
defp compare_hash(block, hash) do
computed = Block.calculate_block_hash(block)
if computed == hash do
:ok
else
{:error, {:wrong_hash, {:doesnt_match_provided, computed, hash}}}
end
end
@spec valid_coinbase?(Block) :: :ok | {:error, :no_coinbase} | {:error, :too_many_coinbase}
def valid_coinbase?(%{transactions: transactions, index: block_index}) do
coinbase = hd(transactions)
with :ok <- coinbase_exist?(coinbase),
:ok <- is_coinbase?(coinbase),
:ok <- appropriate_coinbase_output?(transactions, block_index),
:ok <- one_coinbase?(transactions) do
:ok
else
err -> err
end
end
def one_coinbase?(transactions) do
one =
transactions
|> Enum.filter(& &1.txtype == "COINBASE")
|> length()
|> Kernel.==(1)
if one, do: :ok, else: {:error, :too_many_coinbase}
end
def coinbase_exist?(nil), do: {:error, :no_coinbase}
def coinbase_exist?(_coinbase), do: :ok
@spec valid_transaction?(Transaction, function) :: :ok | {:error, any}
def valid_transaction?(transaction, pool_check \\ &Oracle.inquire(:"Elixir.Elixium.Store.UtxoOracle", {:in_pool?, [&1]}))
@doc """
Coinbase transactions are validated separately. If a coinbase transaction
gets here it'll always return true
"""
def valid_transaction?(%{txtype: "COINBASE"}, _pool_check), do: :ok
@doc """
Checks if a transaction is valid. A transaction is considered valid if
1) all of its inputs are currently in our UTXO pool and 2) all addresses
listed in the inputs have a corresponding signature in the sig set of the
transaction. pool_check is a function which tests whether or not a
given input is in a pool (this is mostly used in the case of a fork), and
this function must return a boolean.
"""
def valid_transaction?(transaction, pool_check) do
with :ok <- correct_tx_id?(transaction),
:ok <- passes_pool_check?(transaction, pool_check),
:ok <- tx_addr_match?(transaction),
:ok <- tx_sigs_valid?(transaction),
:ok <- utxo_amount_integer?(transaction),
:ok <- outputs_dont_exceed_inputs?(transaction) do
:ok
else
err -> err
end
end
@spec correct_tx_id?(Transaction) :: :ok | {:error, {:invalid_tx_id, String.t(), String.t()}}
def correct_tx_id?(transaction) do
expected_id = Transaction.calculate_hash(transaction)
if expected_id == transaction.id do
:ok
else
{:error, {:invalid_tx_id, expected_id, transaction.id}}
end
end
@spec passes_pool_check?(Transaction, function) :: :ok | {:error, :failed_pool_check}
def passes_pool_check?(%{inputs: inputs}, pool_check) do
if Enum.all?(inputs, & pool_check.(&1)) do
:ok
else
{:error, :failed_pool_check}
end
end
@spec tx_addr_match?(Transaction) :: :ok | {:error, :sig_set_mismatch}
defp tx_addr_match?(transaction) do
signed_addresses = Enum.map(transaction.sigs, fn {addr, _sig} -> addr end)
# Check that all addresses in the inputs are also part of the signature set
all? =
transaction.inputs
|> Enum.map(& &1.addr)
|> Enum.uniq()
|> Enum.all?(& Enum.member?(signed_addresses, &1))
if all?, do: :ok, else: {:error, :sig_set_mismatch}
end
@spec tx_sigs_valid?(Transaction) :: :ok | {:error, :invalid_tx_sig}
defp tx_sigs_valid?(transaction) do
all? =
Enum.all?(transaction.sigs, fn {addr, sig} ->
pub = KeyPair.address_to_pubkey(addr)
transaction_digest = Transaction.signing_digest(transaction)
KeyPair.verify_signature(pub, sig, transaction_digest)
end)
if all?, do: :ok, else: {:error, :invalid_tx_sig}
end
@spec utxo_amount_integer?(Transaction) :: :ok | {:error, :utxo_amount_not_integer}
def utxo_amount_integer?(transaction) do
if Enum.all?(transaction.inputs ++ transaction.outputs, & is_integer(&1.amount)) do
:ok
else
{:error, :utxo_amount_not_integer}
end
end
@spec outputs_dont_exceed_inputs?(Transaction) :: :ok | {:error, {:outputs_exceed_inputs, integer, integer}}
defp outputs_dont_exceed_inputs?(transaction) do
input_total = Transaction.sum_inputs(transaction.inputs)
output_total = Transaction.sum_inputs(transaction.outputs)
if output_total <= input_total do
:ok
else
{:error, {:outputs_exceed_inputs, output_total, input_total}}
end
end
@spec valid_transactions?(Block, function) :: :ok | {:error, {:invalid_transactions, list}}
def valid_transactions?(%{transactions: transactions}, pool_check \\ &Oracle.inquire(:"Elixir.Elixium.Store.UtxoOracle", {:in_pool?, [&1]})) do
results = Enum.map(transactions, & valid_transaction?(&1, pool_check))
if Enum.all?(results, & &1 == :ok), do: :ok, else: {:error, {:invalid_transactions, Enum.filter(results, & &1 != :ok)}}
end
@spec is_coinbase?(Transaction) :: :ok | {:error, {:not_coinbase, String.t()}}
defp is_coinbase?(%{txtype: "COINBASE"}), do: :ok
defp is_coinbase?(tx), do: {:error, {:not_coinbase, tx.txtype}}
@spec appropriate_coinbase_output?(list, number) :: :ok | {:error, :invalid_coinbase, integer, integer, integer}
defp appropriate_coinbase_output?([coinbase | transactions], block_index) do
total_fees = Block.total_block_fees(transactions)
reward =
block_index
|> :binary.decode_unsigned()
|> Block.calculate_block_reward()
amount = hd(coinbase.outputs).amount
if total_fees + reward == amount do
:ok
else
{:error, {:invalid_coinbase, total_fees, reward, amount}}
end
end
@spec valid_timestamp?(Block) :: :ok | {:error, :timestamp_too_high}
defp valid_timestamp?(%{timestamp: timestamp}) do
ftl = Application.get_env(:elixium_core, :future_time_limit)
current_time =
DateTime.utc_now()
|> DateTime.to_unix()
if timestamp < current_time + ftl, do: :ok, else: {:error, :timestamp_too_high}
end
@spec valid_block_size?(Block) :: {:ok} | {:error, :block_too_large}
defp valid_block_size?(block) do
block_size_limit = Application.get_env(:elixium_core, :block_size_limit)
under_size_limit =
block
|> BlockEncoder.encode()
|> byte_size()
|> Kernel.<=(block_size_limit)
if under_size_limit, do: :ok, else: {:error, :block_too_large}
end
end | lib/validator.ex | 0.718397 | 0.476884 | validator.ex | starcoder |
defmodule Bitcoin.Protocol.Messages.Block do
@moduledoc """
The block message is sent in response to a getdata message which requests transaction information from a block hash.
The SHA256 hash that identifies each block (and which must have a run of 0 bits) is calculated from the first 6
fields of this structure (version, prev_block, merkle_root, timestamp, bits, nonce, and standard SHA256 padding,
making two 64-byte chunks in all) and not from the complete block. To calculate the hash, only two chunks need to
be processed by the SHA256 algorithm. Since the nonce field is in the second chunk, the first chunk stays constant
during mining and therefore only the second chunk needs to be processed. However, a Bitcoin hash is the hash of the
hash, so two SHA256 rounds are needed for each mining iteration. See Block hashing algorithm
<https://en.bitcoin.it/wiki/Block_hashing_algorithm> for details and an example.
https://en.bitcoin.it/wiki/Protocol_documentation#block
"""
alias Bitcoin.Protocol.Messages.Tx
import Bitcoin.Protocol
# Block version information, based upon the software version creating this block
defstruct version: 0,
# char[32], The hash value of the previous block this particular block references
previous_block:
<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0>>,
# char[32], The reference to a Merkle tree collection which is a hash of all transactions related to this block
merkle_root:
<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0>>,
# uint32_t, A Unix timestamp recording when this block was created (Currently limited to dates before the year 2106!)
timestamp: 0,
# uint32_t, The calculated difficulty target being used for this block
bits: 0,
# uint32_t, The nonce used to generate this block… to allow variations of the header and compute different hashes
nonce: 0,
# count - Bitcoin.Protocol.Types.Integer, number of transaction entries in this block, [Transaction]
transactions: []
@type t :: %__MODULE__{
version: integer,
previous_block: Bitcoin.Block.t_hash(),
merkle_root: Bitcoin.t_hash(),
timestamp: non_neg_integer,
bits: non_neg_integer,
nonce: non_neg_integer,
transactions: list(Tx.t())
}
@spec parse(binary) :: t
def parse(data) do
<<version::little-integer-size(32), previous_block::bytes-size(32),
merkle_root::bytes-size(32), timestamp::unsigned-little-integer-size(32),
bits::unsigned-little-integer-size(32), nonce::unsigned-little-integer-size(32),
payload::binary>> = data
{transactions, _} = payload |> collect_items(Tx)
%__MODULE__{
version: version,
previous_block: previous_block,
merkle_root: merkle_root,
timestamp: timestamp,
bits: bits,
nonce: nonce,
transactions: transactions
}
end
@spec serialize(t) :: binary
def serialize(%__MODULE__{} = s) do
(s |> serialize_header) <>
(s.transactions |> serialize_items)
end
# Serialization of header fields is separated so that we can compute the block hash
# Note that these differ from Types.BlockHeader by transaction_count field
@spec serialize_header(t) :: binary
def serialize_header(%__MODULE__{} = s) do
<<
s.version::little-integer-size(32),
s.previous_block::bytes-size(32),
s.merkle_root::bytes-size(32),
s.timestamp::unsigned-little-integer-size(32),
s.bits::unsigned-little-integer-size(32),
s.nonce::unsigned-little-integer-size(32)
>>
end
# Transform Block struct to Types.BlockHeader struct
@spec header(t) :: Bitcoin.Protocol.Types.BlockHeader.t()
def header(%__MODULE__{} = block) do
%Bitcoin.Protocol.Types.BlockHeader{}
|> Map.merge(
block
|> Map.from_struct()
|> Map.put(:transaction_count, block.transactions |> length)
|> Map.delete(:transactions)
)
end
end | lib/bitcoin/protocol/messages/block.ex | 0.879755 | 0.710415 | block.ex | starcoder |
defmodule ContentSecurityPolicy.Plug.AddSourceValue do
@moduledoc """
Plug which adds a source value to the given directive.
This plug must be run after the `ContentSecurityPolicy.Setup` plug, or it
will raise an exception.
## Example Usage
In a controller or router:
plug ContentSecurityPolicy.Setup
plug ContentSecurityPolicy.AddSourceValue,
script_src: "https://google.com"
When the response is sent to the browser, the `"content-security-policy"`
response header will contain `"script-src https://google.com"` directive.
Multiple directives and source values can be provided in the same call.
plug ContentSecurityPolicy.AddSourceValue,
script_src: "'self'",
script_src: "https://google.com"
When the response is sent to the browser, the `"content-security-policy"`
response header will contain `"script-src 'self' https://google.com"`
directive.
The `ContentSecurityPolicy.Plug.AddSourceValue` plug is additive. It will
never replace or remove old source values associated with a directive.
"""
import Plug.Conn
alias ContentSecurityPolicy.Directive
alias ContentSecurityPolicy.Policy
def init([]), do: raise_no_arguments_error()
def init(opts) do
Enum.each(opts, fn {directive, _source_value} ->
Directive.validate_directive!(directive)
end)
opts
end
def call(_conn, []), do: raise_no_arguments_error()
def call(conn, opts) do
existing_policy = get_policy!(conn)
updated_policy =
Enum.reduce(opts, existing_policy, fn {directive, source_value}, policy ->
ContentSecurityPolicy.add_source_value(policy, directive, source_value)
end)
put_private(conn, :content_security_policy, updated_policy)
end
defp get_policy!(%{private: %{content_security_policy: %Policy{} = policy}}) do
policy
end
defp get_policy!(_) do
raise """
Attempted to add a source value to the content security policy, but the
content security policy was not initialized.
Please make sure that the `ContentSecurityPolicy.Plug.Setup` plug is run
before the `ContentSecurityPolicy.Plug.AddSourceValue` plug.
"""
end
defp raise_no_arguments_error do
raise ArgumentError, """
No directive and source value supplied to the
`ContentSecurityPolicy.Plug.AddSourceValue` plug.
"""
end
end | lib/content_security_policy/plug/add_source_value.ex | 0.895925 | 0.489931 | add_source_value.ex | starcoder |
defmodule Toastr.Notifier do
@moduledoc """
Helper to make useful notifications to be used in LiveView.
It implements the to_flash/2 function to put flashes on the socket.
## Examples
```elixir
defmodule MyWeb.Notifier do
use Toastr.Notifier, MyWeb.Gettext
defp model_name(%My.Accounts.User{name: name}), do: {gettext("User"), name}
defp model_name(%My.Customers.Customer{name: name}), do: {gettext("Customer"), name}
defp model_name(_), do: :bad_model
end
```
You can now use this Notfier in `Toastr.Phoenix.Show` and `Toastr.Phoenix.Index`.
"""
@doc """
When used, implements Helpers for notifications.
"""
defmacro __using__(gettext) do
quote do
require Logger
alias Phoenix.LiveView
defp gettext(msg, opts \\ []), do: Gettext.gettext(unquote(gettext), msg, opts)
def to_flash(%LiveView.Socket{} = socket, {:error, data, message}) do
{model, name} = model_name(data)
msg = "error saving %{model} %{name}: %{message}"
params = [model: model, name: name, message: gettext(message)]
socket
|> LiveView.put_flash(:error, gettext(msg, params))
end
def to_flash(%LiveView.Socket{} = socket, {action, data}) do
case model_name(data) do
{model, name} ->
msg = "%{model} %{name} %{action} successfully"
params = [model: model, name: name, action: gettext(Atom.to_string(action))]
socket
|> LiveView.put_flash(:info, gettext(msg, params))
:bad_model ->
Logger.error("Got unhandled Notifier data: #{inspect(data)}")
socket
end
end
def handle_info({action, _data} = msg, socket) when is_atom(action),
do: {:noreply, to_flash(socket, msg)}
end
end
use Phoenix.HTML
@doc """
Renders flash errors as drop in notifications.
"""
def flash_errors(conn) do
conn.private[:phoenix_flash]
|> flash_live_errors()
end
@doc """
Renders live flash errors as drop in notifications.
"""
def flash_live_errors(nil), do: ~E""
@doc """
Renders live flash errors as drop in notifications.
"""
def flash_live_errors(flashes) do
~E"""
<%= for {category, message} <- flashes do %>
<span class="live-flash" data-level="<%= category %>"><%= message %></span>
<% end %>
"""
end
@doc """
Helper function to redirect if its id matches the given id for the given name.
"""
def redirect_if_id(socket, needed_id, %{"id" => object_id}, to) do
if object_id != needed_id do
socket
else
Phoenix.LiveView.push_redirect(socket, to)
end
end
@doc """
Helper function to update object if its id matches the given id for the given name.
"""
def update_if_id(socket, name, needed_id, %{"id" => object_id} = object) do
if object_id != needed_id do
socket
else
Phoenix.LiveView.update(socket, name, fn _ -> object end)
end
end
@doc """
Helper function to update object if its id matches the given id for the given name, this one does nothing.
"""
def update_if_id(socket, _, _, _), do: socket
end | lib/toastr/notifier.ex | 0.742608 | 0.632006 | notifier.ex | starcoder |
if Code.ensure_loaded?(Ecto) do
defmodule PromEx.Plugins.Ecto do
@moduledoc """
This plugin captures metrics emitted by Ecto. Be sure that your PromEx module is listed before your Repo module
in your supervision tree so that the Ecto init events are not missed. If you miss those events the dashboard
variable dropdowns for the repo value will be broken.
This plugin supports the following options:
- `otp_app`: This is a REQUIRED option and is the name of you application in snake case (e.g. :my_cool_app).
- `repos`: This is an OPTIONAL option and is a list with the full module name of your Ecto Repos (e.g [MyApp.Repo]).
If you do not provide this value, PromEx will attempt to resolve your Repo modules via the
`:ecto_repos` configuration on your OTP app.
This plugin exposes the following metric groups:
- `:ecto_init_event_metrics`
- `:ecto_query_event_metrics`
To use plugin in your application, add the following to your PromEx module `plugins/0` function:
```
def plugins do
[
...
{PromEx.Plugins.Ecto, otp_app: :web_app, repos: [WebApp.Repo]}
]
end
```
"""
use PromEx.Plugin
require Logger
@init_event [:ecto, :repo, :init]
@query_event [:prom_ex, :plugin, :ecto, :query]
@impl true
def event_metrics(opts) do
otp_app = Keyword.fetch!(opts, :otp_app)
metric_prefix = PromEx.metric_prefix(otp_app, :ecto)
repo_event_prefixes =
opts
|> Keyword.get_lazy(:repos, fn ->
Application.get_env(otp_app, :ecto_repos)
end)
|> Enum.map(fn repo ->
otp_app
|> Application.get_env(repo)
|> Keyword.get_lazy(:telemetry_prefix, fn ->
telemetry_prefix(repo)
end)
end)
# Telemetry metrics will emit warnings if multiple handlers with the same names are defined.
# As a result, this plugin supports gathering metrics on multiple repos, but needs to proxy
# them as not to create multiple definitions of the same metrics. The final data point will
# have a label for the Repo associated with the event though so you'll be able to separate one
# repos measurements from another.
set_up_telemetry_proxy(repo_event_prefixes)
# Event metrics definitions
[
init_metrics(metric_prefix),
query_metrics(metric_prefix)
]
end
@doc false
def handle_proxy_query_event(_event_name, event_measurement, event_metadata, _config) do
:telemetry.execute(@query_event, event_measurement, event_metadata)
end
# Generate the default telemetry prefix
defp telemetry_prefix(repo) do
repo
|> Module.split()
|> Enum.map(&(&1 |> Macro.underscore() |> String.to_atom()))
end
defp init_metrics(metric_prefix) do
Event.build(
:ecto_init_event_metrics,
[
last_value(
metric_prefix ++ [:repo, :init, :status, :info],
event_name: @init_event,
description: "Information regarding the initialized repo.",
measurement: fn _measurements -> 1 end,
tags: [:repo, :database_name, :database_host],
tag_values: &ecto_init_tag_values/1
),
last_value(
metric_prefix ++ [:repo, :init, :pool, :size],
event_name: @init_event,
description: "The configured pool size value for the repo.",
measurement: fn _measurements, %{opts: opts} ->
Keyword.get(opts, :pool_size)
end,
tags: [:repo],
tag_values: &ecto_init_tag_values/1
),
last_value(
metric_prefix ++ [:repo, :init, :timeout, :duration],
event_name: @init_event,
description: "The configured timeout value for the repo.",
measurement: fn _measurements, %{opts: opts} ->
Keyword.get(opts, :timeout)
end,
tags: [:repo],
tag_values: &ecto_init_tag_values/1
)
]
)
end
defp query_metrics(metric_prefix) do
Event.build(
:ecto_query_event_metrics,
[
# Capture the db connection idle time
distribution(
metric_prefix ++ [:repo, :query, :idle, :time, :milliseconds],
event_name: @query_event,
measurement: :idle_time,
description: "The time the connection spent waiting before being checked out for the query.",
tags: [:repo],
tag_values: &ecto_query_tag_values/1,
reporter_options: [
buckets: [1, 10, 50, 100, 500, 1_000, 5_000, 10_000]
],
unit: {:native, :millisecond}
),
# Capture the db connection queue time
distribution(
metric_prefix ++ [:repo, :query, :queue, :time, :milliseconds],
event_name: @query_event,
measurement: :queue_time,
description: "The time spent waiting to check out a database connection.",
tags: [:repo],
tag_values: &ecto_query_tag_values/1,
reporter_options: [
buckets: [1, 10, 50, 100, 500, 1_000, 5_000, 10_000]
],
unit: {:native, :millisecond}
),
# Capture the db query decode time
distribution(
metric_prefix ++ [:repo, :query, :decode, :time, :milliseconds],
event_name: @query_event,
measurement: :decode_time,
description: "The time spent decoding the data received from the database.",
tags: [:repo],
tag_values: &ecto_query_tag_values/1,
reporter_options: [
buckets: [1, 10, 50, 100, 500, 1_000, 5_000, 10_000]
],
unit: {:native, :millisecond}
),
# Capture the query execution time
distribution(
metric_prefix ++ [:repo, :query, :execution, :time, :milliseconds],
event_name: @query_event,
measurement: :query_time,
description: "The time spent executing the query.",
tags: [:repo, :source, :command],
tag_values: &ecto_query_tag_values/1,
reporter_options: [
buckets: [1, 10, 50, 100, 500, 1_000, 5_000, 10_000]
],
unit: {:native, :millisecond}
),
# Capture the number of results returned
distribution(
metric_prefix ++ [:repo, :query, :results, :returned],
event_name: @query_event,
measurement: fn _measurement, %{result: result} ->
normalize_results_returned(result)
end,
description: "The time spent executing the query.",
tags: [:repo, :source, :command],
tag_values: &ecto_query_tag_values/1,
reporter_options: [
buckets: [1, 10, 50, 100, 250, 500, 1_000, 5_000]
],
drop: fn %{result: result} ->
normalize_results_returned(result) == :drop_data_point
end
)
]
)
end
defp set_up_telemetry_proxy(repo_event_prefixes) do
repo_event_prefixes
|> Enum.each(fn telemetry_prefix ->
query_event = telemetry_prefix ++ [:query]
:telemetry.attach(
[:prom_ex, :ecto, :proxy] ++ telemetry_prefix,
query_event,
&__MODULE__.handle_proxy_query_event/4,
%{}
)
end)
end
defp ecto_init_tag_values(%{repo: repo, opts: opts}) do
%{
repo: repo |> Atom.to_string() |> String.trim_leading("Elixir."),
database_name: Keyword.get(opts, :database),
database_host: Keyword.get(opts, :hostname)
}
end
defp ecto_query_tag_values(%{repo: repo, source: source, result: result}) do
%{
repo: repo |> Atom.to_string() |> String.trim_leading("Elixir."),
source: normalize_source(source),
command: normalize_command(result)
}
end
defp normalize_source(nil), do: "source_unavailable"
defp normalize_source(source) when is_binary(source), do: source
defp normalize_source(source) when is_atom(source), do: Atom.to_string(source)
defp normalize_source(_), do: "source_unavailable"
defp normalize_command({:ok, %_{command: command}}) when is_atom(command) do
Atom.to_string(command)
end
defp normalize_command(_) do
"unavailable"
end
defp normalize_results_returned({:ok, %_{num_rows: num_row}}) when is_integer(num_row) do
num_row
end
defp normalize_results_returned(_) do
:drop_data_point
end
end
else
defmodule PromEx.Plugins.Ecto do
@moduledoc false
use PromEx.Plugin
@impl true
def event_metrics(_opts) do
PromEx.Plugin.no_dep_raise(__MODULE__, "Ecto")
end
end
end | lib/prom_ex/plugins/ecto.ex | 0.813127 | 0.690813 | ecto.ex | starcoder |
defmodule FlowAssertions do
@moduledoc """
This is a library of assertions for Elixir's ExUnit. It emphasizes two things:
1. Making tests easier to scan by capturing frequently-used assertions in
functions that can be used in a pipeline.
This library will appeal to people who prefer this:
```elixir
VM.ServiceGap.accept_form(params, @institution)
|> ok_content
|> assert_valid
|> assert_changes(id: 1,
in_service_datestring: @iso_date_1,
out_of_service_datestring: @iso_date_2,
reason: "reason")
```
... to this:
```elixir
assert {:ok, changeset} = VM.ServiceGap.accept_form(params, @institution)
assert changeset.valid?
changes = changeset.changes
assert changes.id == 1
assert changes.in_service_datestring == @iso_date_1
assert changes.out_of_service_datestring == @iso_date_2
assert changes.reason == "reason"
```
The key point here is that all of the `assert_*` functions in this package
return their first argument to be used with later chained functions.
2. Error messages as helpful as those in the base ExUnit assertions:
<img src="https://raw.githubusercontent.com/marick/flow_assertions/main/pics/error2.png"/>
## Installation
Add `flow_assertions` to your list of dependencies in `mix.exs`:
```elixir
def deps do
[
{:flow_assertions, "~> 0.6", only: :test},
]
end
```
## Use
The easiest way is `use FlowAssertions`, which imports the most important modules, which are:
* `FlowAssertions.MapA`
* `FlowAssertions.MiscA`
* `FlowAssertions.EnumA`
* `FlowAssertions.StructA`
(in roughly that order).
If you prefer to `alias` rather than `import`, note that all the
assertion modules end in `A`. That way, there's no conflict between
the module with map assertions (`FlowAssertions.MapA` and the `Map`
module itself.
## Reading error output
`ExUnit` has very nice reporting for assertions where a left-hand side is compared to a right-hand side, as in:
```elixir
assert x == y
```
The error output shows the values of both `x` and `y`, using
color-coding to highlight differences.
`FlowAssertions` uses that mechanism when appropriate. However, it
does more complicated comparisons, so the words `left` and `right`
aren't strictly accurate. So, suppose you're reading errors from code
like this:
```elixir
calculation
|> assert_something(expected)
|> assert_something_else(expected)
```
In the output, `left` will refer to some value extracted from
`calculation` and `right` will refer to a value extracted from
`expected` (most likely `expected` itself).
## Defining your own assertions
*TBD*
## Related code
* [assertions](https://hexdocs.pm/assertions/Assertions.html) is another package of common assertions.
* [ecto_flow_assertions](https://hexdocs.pm/ecto_flow_assertions/FlowAssertions.Ecto.html) extends this library with Ecto-specific assertions.
* [phoenix_integration](https://hexdocs.pm/phoenix_integration/PhoenixIntegration.html) uses flow-style assertions for integration testing.
## Change log
[Here](./changelog.html).
"""
defmacro __using__(_) do
quote do
import FlowAssertions.EnumA
import FlowAssertions.MapA
import FlowAssertions.MiscA
import FlowAssertions.StructA
import FlowAssertions.Checkers
end
end
end | lib/flow_assertions.ex | 0.878419 | 0.938463 | flow_assertions.ex | starcoder |
defmodule Model.Prediction do
@moduledoc """
The predicted `arrival_time` and `departure_time` to/from a stop (`stop_id`) at a given sequence (`stop_sequence`)
along a trip (`trip_id`) going a direction (`direction_id`) along a route (`route_id`).
See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `TripDescriptor`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-tripdescriptor)
See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `StopTimeUpdate`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-stoptimeupdate)
For the scheduled times, see `Model.Schedule.t`.
"""
use Recordable, [
:trip_id,
:stop_id,
:route_id,
:vehicle_id,
:direction_id,
:route_pattern_id,
:arrival_time,
:departure_time,
:stop_sequence,
:schedule_relationship,
:status,
trip_match?: false
]
@typedoc """
| Value | Description |
|----------------|-------------|
| `:added` | An extra trip that was added in addition to a running schedule, for example, to replace a broken vehicle or to respond to sudden passenger load. See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `TripDescriptor` `ScheduleRelationship` `ADDED`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-schedulerelationship-1) |
| `:cancelled` | A trip that existed in the schedule but was removed. See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `TripDescriptor` `ScheduleRelationship` `CANCELED`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-schedulerelationship-1) |
| `:no_data` | No data is given for this stop. It indicates that there is no realtime information available. See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `StopTimeUpdate` `ScheduleRelationship` `NO_DATA`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-schedulerelationship) |
| `:skipped` | The stop was originally scheduled, but was skipped. See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `StopTimeUpdate` `ScheduleRelationship`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-schedulerelationship) |
| `:unscheduled` | A trip that is running with no schedule associated to it. See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `TripDescriptor` `ScheduleRelationship` `UNSCHEDULED`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-schedulerelationship-1) |
| `nil` | Stop was scheduled. See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `TripDescriptor` `ScheduleRelationship` `SCHEDULED`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-schedulerelationship-1) |
See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `TripDescriptor` `ScheduleRelationship`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-schedulerelationship-1)
See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `StopTimeUpdate` `ScheduleRelationship`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-schedulerelationship)
"""
@type schedule_relationship :: :added | :cancelled | :no_data | :skipped | :unscheduled | nil
@typedoc """
* `:arrival_time` - When the vehicle is now predicted to arrive. `nil` if the first stop (`stop_id`) on the the trip
(`trip_id`). See
[GTFS `Realtime` `FeedMessage` `FeedEntity` `TripUpdate` `StopTimeUpdate` `arrival`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-stoptimeupdate).
* `:departure_time` - When the vehicle is now predicted to depart. `nil` if the last stop (`stop_id`) on the trip
(`trip_id`). See
[GTFS `Realtime` `FeedMessage` `FeedEntity` `TripUpdate` `StopTimeUpdate` `departure`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-stoptimeupdate).
* `:direction_id` - Which direction along `route_id` the `trip_id` is going. See
[GTFS `trips.txt` `direction_id`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#tripstxt).
* `:route_id` - The route `trip_id` is on doing in `direction_id`. See
[GTFS `trips.txt` `route_id`](https://github.com/google/transit/blob/master/gtfs/spec/en/reference.md#tripstxt)
* `:schedule_relationship` - How the predicted stop relates to the `Model.Schedule.t` stops.
See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `TripDescriptor` `ScheduleRelationship`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-schedulerelationship-1).
See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `StopTimeUpdate` `ScheduleRelationship`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#enum-schedulerelationship).
* `:status` - Description of change
* `:stop_id` - Stop whose arrival/departure is being predicted. See
[GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `StopTimeUpdate` `stop_id`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-stoptimeupdate).
* `:stop_sequence` - The sequence the `stop_id` is arrived at during the `trip_id`. The stop sequence is
monotonically increasing along the trip, but the `stop_sequence` along the `trip_id` are not necessarily
consecutive. See
[GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `StopTimeUpdate` `stop_sequence`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-stoptimeupdate).
* `:trip_id` - The trip the `stop_id` is on. See [GTFS Realtime `FeedMesage` `FeedEntity` `TripUpdate` `TripDescriptor`](https://github.com/google/transit/blob/master/gtfs-realtime/spec/en/reference.md#message-tripdescriptor)
* `:trip_match?` - a boolean indicating whether the prediction is for a trip in the GTFS file
"""
@type t :: %__MODULE__{
arrival_time: DateTime.t() | nil,
departure_time: DateTime.t() | nil,
direction_id: Model.Direction.id(),
route_id: Model.Route.id(),
route_pattern_id: Model.RoutePattern.id(),
vehicle_id: Model.Vehicle.id() | nil,
schedule_relationship: schedule_relationship,
status: String.t() | nil,
stop_id: Model.Stop.id(),
stop_sequence: non_neg_integer | nil,
trip_id: Model.Trip.id(),
trip_match?: boolean
}
@spec trip_id(t) :: Model.Trip.id()
def trip_id(%__MODULE__{trip_id: trip_id}), do: trip_id
end | apps/model/lib/model/prediction.ex | 0.894824 | 0.692181 | prediction.ex | starcoder |
defmodule Plymio.Codi.Pattern.Proxy do
@moduledoc ~S"""
The *proxy* patterns manage the *vekil*.
See `Plymio.Codi` for an overview and documentation terms.
## Pattern: *proxy_fetch*
The *proxy_fetch* pattern fetches the *forom* of one or more *proxies* in the
*vekil*.
*proxy_fetch* maps directly to a `Plymio.Vekil.proxy_fetch/2` call on
the *vekil*; all of the *proxies* must exist else an error result will be
returned.
Valid keys in the *cpo* are:
| Key | Aliases |
| :--- | :--- |
| `:proxy_name` | *:proxy_names, :proxy, :proxies* |
## Examples
A simple case fetching one *proxy*:
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> }
...> {:ok, {forms, _}} = [
...> vekil: vekil_dict,
...> proxy: :add_1,
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
["def(add_1(x)) do\n x + 1\n end"]
If the *proxy* is not found, or there is no *vekil*, an error result will be returned.
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> }
...> {:error, error} = [
...> vekil: vekil_dict,
...> proxy: :add_11,
...> ] |> produce_codi
...> error |> Exception.message
"proxy invalid, got: :add_11"
iex> {:error, error} = [
...> proxy: :add_11,
...> ] |> produce_codi
...> error |> Exception.message
"vekil missing"
iex> vekil_dict = %{
...> # a map is not a valid form
...> add_1: %{a: 1},
...> }
...> {:error, error} = [
...> vekil: vekil_dict,
...> proxy: :add_1,
...> ] |> produce_codi
...> error |> Exception.message
"form invalid, got: %{a: 1}"
Multiple proxies can be given in a list:
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> sqr_x: quote(do: def(sqr_x(x), do: x * x)),
...> sub_1: quote(do: def(sub_1(x), do: x - 1)),
...> }
...> {:ok, {forms, _}} = [
...> vekil: vekil_dict,
...> proxies: [:add_1, :sqr_x, :sub_1]
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
["def(add_1(x)) do\n x + 1\n end",
"def(sqr_x(x)) do\n x * x\n end",
"def(sub_1(x)) do\n x - 1\n end"]
A *proxy* can be a list of other proxies:
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> sqr_x: quote(do: def(sqr_x(x), do: x * x)),
...> sub_1: quote(do: def(sub_1(x), do: x - 1)),
...> all: [:add_1, :sqr_x, :sub_1],
...> }
...> {:ok, {forms, _}} = [
...> vekil: vekil_dict,
...> proxy: :all
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
["def(add_1(x)) do\n x + 1\n end",
"def(sqr_x(x)) do\n x * x\n end",
"def(sub_1(x)) do\n x - 1\n end"]
When the *proxy* is a list of proxies, infinite loops are caught:
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> sqr_x: quote(do: def(sqr_x(x), do: x * x)),
...> sub_1: quote(do: def(sub_1(x), do: x - 1)),
...> all_loop: [:add_1, :sqr_x, :sub_1, :all_loop],
...> }
...> {:error, error} = [
...> vekil: vekil_dict,
...> proxy: :all_loop
...> ] |> produce_codi
...> error |> Exception.message
"proxy seen before, got: :all_loop"
It is more efficient to pre-create (ideally at compile time) the *vekil*:
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> sqr_x: quote(do: def(sqr_x(x), do: x * x)),
...> sub_1: quote(do: def(sub_1(x), do: x - 1)),
...> all: [:add_1, :sqr_x, :sub_1],
...> }
...> {:ok, %Plymio.Vekil.Form{} = vekil} = [dict: vekil_dict] |>
...> Plymio.Vekil.Form.new
...> {:ok, {forms, _}} = [
...> vekil: vekil,
...> proxy: :all
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
["def(add_1(x)) do\n x + 1\n end",
"def(sqr_x(x)) do\n x * x\n end",
"def(sub_1(x)) do\n x - 1\n end"]
In this example a `:forms_edit` is given renaming all the `x` vars to `a` vars, changing "1" to "42" and
renaming the `add_` function to `incr_1`.
> renaming the vars in this example doesn't change the logic
iex> postwalk_fun = fn
...> 1 -> 42
...> x -> x
...> end
...> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> sqr_x: quote(do: def(sqr_x(x), do: x * x)),
...> sub_1: quote(do: def(sub_1(x), do: x - 1)),
...> all: [:add_1, :sqr_x, :sub_1],
...> }
...> {:ok, {forms, _}} = [
...> vekil: vekil_dict,
...> proxy: [proxy: :all, forms_edit: [
...> postwalk: postwalk_fun,
...> rename_vars: [x: :a],
...> rename_funs: [add_1: :incr_1]]]
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
["def(incr_1(a)) do\n a + 42\n end",
"def(sqr_x(a)) do\n a * a\n end",
"def(sub_1(a)) do\n a - 42\n end"]
## Pattern: *proxy_put*
The *proxy_put* pattern puts one or more *proxies* and their *forom*, into the *vekil*.
*proxy_put* maps directly to a `Plymio.Vekil.proxy_put/2` call on
the *vekil*.
If the *vekil* does not exist, a new `Plymio.Vekil.Form` will be created.
Valid keys in the *cpo* are:
| Key | Aliases |
| :--- | :--- |
| `:proxy_args` | |
## Examples
A simple case puting one *proxy* and then fetching it:
iex> {:ok, {forms, _}} = [
...> proxy_put: [add_1: quote(do: def(add_1(x), do: x + 1))],
...> proxy_fetch: :add_1
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
["def(add_1(x)) do\n x + 1\n end"]
In this example the same *proxy* (`:add_1`) is fetched twice but the
*proxy* is updated between the two fetches.
iex> {:ok, {forms, _}} = [
...> proxy_put: [add_1: quote(do: x = x + 1)],
...> proxy_fetch: :add_1,
...> proxy_put: [add_1: quote(do: x = x + 40)],
...> proxy_fetch: :add_1
...> ] |> produce_codi
...> forms |> harnais_helper_test_forms!(binding: [x: 1])
{42, ["x = x + 1", "x = x + 40"]}
Here an existing *proxy* (`:sqr_x`) is overriden. Note the
"composite" *proxy* `:all` is resolved as late as possible and finds the updated `:sqr_x`:
iex> vekil_dict = %{
...> add_1: quote(do: x = x + 1),
...> sqr_x: quote(do: x = x * x),
...> sub_1: quote(do: x = x - 1),
...> all: [:add_1, :sqr_x, :sub_1],
...> }
...> {:ok, %Plymio.Vekil.Form{} = vekil} = [dict: vekil_dict] |>
...> Plymio.Vekil.Form.new
...> {:ok, {forms, _}} = [
...> vekil: vekil,
...> # change the :sqr_x proxy to cube instead
...> proxy_put: [sqr_x: quote(do: x = x * x * x)],
...> proxy: :all
...> ] |> produce_codi
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{511, ["x = x + 1", "x = x * x * x", "x = x - 1"]}
## Pattern: *proxy_delete*
The *proxy_delete* pattern delete one or more *proxies* from the
*vekil*. It can be used to change the behaviour of a subsequent `proxy_get` to use the `default`.
No *vekil* and / or any unknown *proxy* are ridden out without causing an error.
*proxy_delete* maps directly to a `Plymio.Vekil.proxy_delete/2` call on
the *vekil*.
Valid keys in the *cpo* are:
| Key | Aliases |
| :--- | :--- |
| `:proxy_name` | *:proxy_names, :proxy, :proxies* |
## Examples
A simple case of deleting a *proxy* and then fetching it:
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> }
...> {:error, error} = [
...> vekil: vekil_dict,
...> proxy_delete: :add_1,
...> proxy_fetch: :add_1
...> ] |> produce_codi
...> error |> Exception.message
"proxy invalid, got: :add_1"
No *vekil* and / or unknown *proxies* are ridden out without causing an error:
iex> {:ok, {[], codi}} = [
...> proxy_delete: :add_1,
...> proxy_delete: :does_not_matter
...> ] |> produce_codi
...> match?(%Plymio.Codi{}, codi)
true
## Pattern: *proxy_get*
The *proxy_get* pattern gets one or more *proxies* from the
*vekil* but with an optional `default` to be returned (as a *forom*) if the *proxy* is not found.
*proxy_get* maps directly to a `Plymio.Vekil.proxy_get/2` or `Plymio.Vekil.proxy_get/3` call on
Valid keys in the *cpo* are:
| Key | Aliases |
| :--- | :--- |
| `:proxy_name` | *:proxy_names, :proxy, :proxies* |
| `:default` | |
## Examples
Here the *proxy* exists in the *vekil*:
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> }
...> {:ok, {forms, _}} = [
...> vekil: vekil_dict,
...> proxy_get: :add_1,
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
["def(add_1(x)) do\n x + 1\n end"]
If the *proxy* does not exists, and there is no `default`, no forms are returned:
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> }
...> {:ok, {forms, _}} = [
...> vekil: vekil_dict,
...> proxy_get: :add_2,
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
[]
Here a default is provided. Note the `default` is automatically
normalised to a *forom* and then realised.
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> }
...> {:ok, {forms, _}} = [
...> vekil: vekil_dict,
...> proxy_get: [proxy: :add_2, default: quote(do: def(add_42(x), do: x + 42))]
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
["def(add_42(x)) do\n x + 42\n end"]
The `default` can be another *proxy* in the *vekil*:
iex> vekil_dict = %{
...> add_1: quote(do: def(add_1(x), do: x + 1)),
...> add_42: quote(do: def(add_42(x), do: x + 42)),
...> }
...> {:ok, {forms, _}} = [
...> vekil: vekil_dict,
...> proxy_get: [proxy_name: :add_2, default: :add_42]
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
["def(add_42(x)) do\n x + 42\n end"]
If there is no *vekil* and no `default`, no forms are returned:
iex> {:ok, {forms, _}} = [
...> proxy_get: :add_2,
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
[]
No *vekil* but a default works as expected:
iex> {:ok, {forms, _}} = [
...> proxy_get: [proxy: :add_2, default: quote(do: def(add_42(x), do: x + 42))]
...> ] |> produce_codi
...> forms |> harnais_helper_show_forms!
["def(add_42(x)) do\n x + 42\n end"]
As many defaults as *proxies* are returned:
iex> {:ok, {forms, _}} = [
...> proxy_get: [
...> proxy: [:x_sub_1, :a_mul_x, :not_a_proxy, :some_other_thing],
...> default: quote(do: x = x + 1)]
...> ] |> produce_codi
...> forms |> harnais_helper_test_forms!(binding: [x: 1])
{5, ["x = x + 1", "x = x + 1", "x = x + 1", "x = x + 1"]}
"""
alias Plymio.Codi, as: CODI
use Plymio.Fontais.Attribute
use Plymio.Codi.Attribute
import Plymio.Fontais.Guard,
only: [
is_value_set: 1,
is_value_unset_or_nil: 1
]
import Plymio.Codi.Error,
only: [
new_error_result: 1
]
import Plymio.Fontais.Option,
only: [
opts_create_aliases_dict: 1
]
import Plymio.Codi.Utility,
only: [
opts_resolve_proxy_names: 1
]
import Plymio.Fontais.Form,
only: [
forms_edit: 2
]
import Plymio.Codi.CPO,
only: [
cpo_normalise: 2,
cpo_done_with_edited_form: 2,
cpo_get_proxy_args: 1,
cpo_get_proxy_default: 1
]
@pattern_proxy_fetch_kvs_alias [
@plymio_codi_key_alias_pattern,
@plymio_codi_key_alias_status,
@plymio_codi_key_alias_form,
@plymio_codi_key_alias_state,
@plymio_codi_key_alias_proxy_name,
@plymio_codi_key_alias_forms_edit
]
@pattern_proxy_fetch_dict_alias @pattern_proxy_fetch_kvs_alias
|> opts_create_aliases_dict
@doc false
def cpo_pattern_proxy_fetch_normalise(cpo, dict \\ nil) do
cpo |> cpo_normalise(dict || @pattern_proxy_fetch_dict_alias)
end
@pattern_proxy_put_kvs_alias [
@plymio_codi_key_alias_pattern,
@plymio_codi_key_alias_status,
@plymio_codi_key_alias_proxy_args
]
@pattern_proxy_put_dict_alias @pattern_proxy_put_kvs_alias
|> opts_create_aliases_dict
@doc false
def cpo_pattern_proxy_put_normalise(cpo, dict \\ nil) do
cpo |> cpo_normalise(dict || @pattern_proxy_put_dict_alias)
end
@pattern_proxy_delete_kvs_alias [
@plymio_codi_key_alias_pattern,
@plymio_codi_key_alias_status,
@plymio_codi_key_alias_proxy_name
]
@pattern_proxy_delete_dict_alias @pattern_proxy_delete_kvs_alias
|> opts_create_aliases_dict
@doc false
def cpo_pattern_proxy_delete_normalise(cpo, dict \\ nil) do
cpo |> cpo_normalise(dict || @pattern_proxy_delete_dict_alias)
end
@pattern_proxy_get_kvs_alias [
@plymio_codi_key_alias_pattern,
@plymio_codi_key_alias_status,
@plymio_codi_key_alias_proxy_name,
@plymio_codi_key_alias_proxy_default,
@plymio_codi_key_alias_forms_edit
]
@pattern_proxy_get_dict_alias @pattern_proxy_get_kvs_alias
|> opts_create_aliases_dict
@doc false
def cpo_pattern_proxy_get_normalise(cpo, dict \\ nil) do
cpo |> cpo_normalise(dict || @pattern_proxy_get_dict_alias)
end
@doc false
def express_pattern(codi, pattern, opts)
def express_pattern(%CODI{@plymio_codi_field_vekil => vekil}, pattern, _cpo)
when pattern == @plymio_codi_pattern_proxy_fetch and is_value_unset_or_nil(vekil) do
new_error_result(m: "vekil missing")
end
def express_pattern(%CODI{@plymio_codi_field_vekil => vekil} = state, pattern, cpo)
when pattern == @plymio_codi_pattern_proxy_fetch do
with {:ok, cpo} <- cpo |> cpo_pattern_proxy_fetch_normalise,
{:ok, proxy_names} <- cpo |> opts_resolve_proxy_names,
{:ok, forms} <- vekil |> realise_proxy_fetch_forom(proxy_names),
{:ok, forms} <- forms |> forms_edit(cpo),
{:ok, cpo} <- cpo |> cpo_done_with_edited_form(forms) do
{:ok, {cpo, state}}
else
{:error, %{__exception__: true}} = result -> result
end
end
def express_pattern(%CODI{@plymio_codi_field_vekil => vekil} = state, pattern, cpo)
when pattern == @plymio_codi_pattern_proxy_put and is_value_unset_or_nil(vekil) do
with {:ok, %Plymio.Vekil.Form{} = vekil} <- Plymio.Vekil.Form.new(),
{:ok, %CODI{} = state} <- state |> CODI.update_vekil(vekil),
{:ok, _} = result <- state |> express_pattern(pattern, cpo) do
result
else
{:error, %{__exception__: true}} = result -> result
end
end
def express_pattern(%CODI{@plymio_codi_field_vekil => vekil} = state, pattern, cpo)
when pattern == @plymio_codi_pattern_proxy_put do
with {:ok, cpo} <- cpo |> cpo_pattern_proxy_put_normalise,
{:ok, proxy_args} <- cpo |> cpo_get_proxy_args,
{:ok, vekil} <- vekil |> Plymio.Vekil.proxy_put(proxy_args),
{:ok, %CODI{} = state} <- state |> CODI.update_vekil(vekil) do
{:ok, {[], state}}
else
{:error, %{__exception__: true}} = result -> result
end
end
def express_pattern(%CODI{@plymio_codi_field_vekil => vekil} = state, pattern, _cpo)
when pattern == @plymio_codi_pattern_proxy_delete and is_value_unset_or_nil(vekil) do
{:ok, {[], state}}
end
def express_pattern(%CODI{@plymio_codi_field_vekil => vekil} = state, pattern, cpo)
when pattern == @plymio_codi_pattern_proxy_delete do
with {:ok, cpo} <- cpo |> cpo_pattern_proxy_delete_normalise,
{:ok, proxy_names} <- cpo |> opts_resolve_proxy_names,
{:ok, vekil} <- vekil |> Plymio.Vekil.proxy_delete(proxy_names),
{:ok, %CODI{} = state} <- state |> CODI.update_vekil(vekil) do
{:ok, {[], state}}
else
{:error, %{__exception__: true}} = result -> result
end
end
def express_pattern(%CODI{@plymio_codi_field_vekil => vekil} = state, pattern, cpo)
when pattern == @plymio_codi_pattern_proxy_get do
with {:ok, cpo} <- cpo |> cpo_pattern_proxy_get_normalise,
{:ok, proxy_names} <- cpo |> opts_resolve_proxy_names,
{:ok, default} <- cpo |> cpo_get_proxy_default,
{:ok, forms} <- vekil |> realise_proxy_get_forom(proxy_names, default),
{:ok, forms} <- forms |> forms_edit(cpo),
{:ok, cpo} <- cpo |> cpo_done_with_edited_form(forms) do
{:ok, {cpo, state}}
else
{:error, %{__exception__: true}} = result -> result
end
end
def express_pattern(_codi, pattern, opts) do
new_error_result(m: "proxy pattern #{inspect(pattern)} invalid", v: opts)
end
defp resolve_forom(vekil, forom)
defp resolve_forom(state, forom) do
cond do
Plymio.Vekil.Utility.forom?(forom) ->
{:ok, {forom, state}}
Plymio.Vekil.Utility.vekil?(state) ->
state |> Plymio.Vekil.forom_normalise(forom)
# default is a form forom
true ->
with {:ok, forom} <- forom |> Plymio.Vekil.Forom.Form.normalise() do
{:ok, {forom, state}}
else
{:error, %{__exception__: true}} = result -> result
end
end
end
defp realise_proxy_fetch_forom(state, proxies) do
with {:ok, state} <- state |> Plymio.Vekil.Utility.validate_vekil(),
{:ok, {forom, _}} <- state |> Plymio.Vekil.proxy_fetch(proxies),
{:ok, {forms, _}} <- forom |> Plymio.Vekil.Forom.realise() do
{:ok, forms}
else
{:error, %{__exception__: true}} = result -> result
end
end
defp realise_proxy_get_forom(vekil, proxies, default)
defp realise_proxy_get_forom(state, proxies, default) when is_value_set(state) do
with {:ok, state} <- state |> Plymio.Vekil.Utility.validate_vekil(),
{:ok, {forom, _}} <- state |> Plymio.Vekil.proxy_get(proxies, default),
{:ok, {forms, _}} <- forom |> Plymio.Vekil.Forom.realise() do
{:ok, forms}
else
{:error, %{__exception__: true}} = result -> result
end
end
defp realise_proxy_get_forom(state, proxies, default) when is_value_unset_or_nil(state) do
default
|> is_value_set
|> case do
true ->
with {:ok, {forom, _}} <- state |> resolve_forom(default) do
# need to return as many forom as proxies but as a list forom
defaults = List.duplicate(forom, proxies |> List.wrap() |> length)
with {:ok, forom} <- defaults |> Plymio.Vekil.Utility.forom_reduce(),
{:ok, {forms, _}} <- forom |> Plymio.Vekil.Forom.realise() do
{:ok, forms}
else
{:error, %{__exception__: true}} = result -> result
end
else
{:error, %{__exception__: true}} = result -> result
end
# no vekil and no default => return no forms
_ ->
{:ok, []}
end
end
end | lib/codi/pattern/proxy/proxy.ex | 0.826116 | 0.403508 | proxy.ex | starcoder |
defmodule Livebook.LiveMarkdown.MarkdownHelpers do
@doc """
Reformats the given markdown document.
"""
@spec reformat(String.t()) :: String.t()
def reformat(markdown) do
markdown
|> EarmarkParser.as_ast()
|> elem(1)
|> markdown_from_ast()
end
@doc """
Extracts plain text from the given AST ignoring all the tags.
"""
@spec text_from_ast(EarmarkParser.ast()) :: String.t()
def text_from_ast(ast)
def text_from_ast(ast) when is_list(ast) do
ast
|> Enum.map(&text_from_ast/1)
|> Enum.join("")
end
def text_from_ast(ast) when is_binary(ast), do: ast
def text_from_ast({_, _, ast, _}), do: text_from_ast(ast)
@doc """
Renders Markdown string from the given `EarmarkParser` AST.
"""
@spec markdown_from_ast(EarmarkParser.ast()) :: String.t()
def markdown_from_ast(ast) do
build_md([], ast)
|> IO.iodata_to_binary()
|> String.trim()
end
defp build_md(iodata, ast)
defp build_md(iodata, []), do: iodata
defp build_md(iodata, [string | ast]) when is_binary(string) do
string
|> append_inline(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{tag, attrs, lines, %{verbatim: true}} | ast]) do
render_html(tag, attrs, lines)
|> append_block(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"em", _, content, %{}} | ast]) do
render_emphasis(content)
|> append_inline(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"strong", _, content, %{}} | ast]) do
render_strong(content)
|> append_inline(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"del", _, content, %{}} | ast]) do
render_strikethrough(content)
|> append_inline(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"code", _, content, %{}} | ast]) do
render_code_inline(content)
|> append_inline(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"a", attrs, content, %{}} | ast]) do
render_link(content, attrs)
|> append_inline(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"img", attrs, [], %{}} | ast]) do
render_image(attrs)
|> append_inline(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{:comment, _, lines, %{comment: true}} | ast]) do
render_comment(lines)
|> append_block(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"hr", attrs, [], %{}} | ast]) do
render_ruler(attrs)
|> append_block(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"br", _, [], %{}} | ast]) do
render_line_break()
|> append_inline(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"p", _, content, %{}} | ast]) do
render_paragraph(content)
|> append_block(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"h" <> n, _, content, %{}} | ast])
when n in ["1", "2", "3", "4", "5", "6"] do
n = String.to_integer(n)
render_heading(n, content)
|> append_block(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"pre", _, [{"code", attrs, [content], %{}}], %{}} | ast]) do
render_code_block(content, attrs)
|> append_block(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"blockquote", [], content, %{}} | ast]) do
render_blockquote(content)
|> append_block(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"table", _, content, %{}} | ast]) do
render_table(content)
|> append_block(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"ul", _, content, %{}} | ast]) do
render_unordered_list(content)
|> append_block(iodata)
|> build_md(ast)
end
defp build_md(iodata, [{"ol", _, content, %{}} | ast]) do
render_ordered_list(content)
|> append_block(iodata)
|> build_md(ast)
end
defp append_inline(md, iodata), do: [iodata, md]
defp append_block(md, iodata), do: [iodata, "\n", md, "\n"]
# Renderers
# https://www.w3.org/TR/2011/WD-html-markup-20110113/syntax.html#void-element
@void_elements ~W(area base br col command embed hr img input keygen link meta param source track wbr)
defp render_html(tag, attrs, []) when tag in @void_elements do
"<#{tag} #{attrs_to_string(attrs)} />"
end
defp render_html(tag, attrs, lines) do
inner = Enum.join(lines, "\n")
"<#{tag} #{attrs_to_string(attrs)}>\n#{inner}\n</#{tag}>"
end
defp render_emphasis(content) do
inner = markdown_from_ast(content)
"*#{inner}*"
end
defp render_strong(content) do
inner = markdown_from_ast(content)
"**#{inner}**"
end
defp render_strikethrough(content) do
inner = markdown_from_ast(content)
"~~#{inner}~~"
end
defp render_code_inline(content) do
inner = markdown_from_ast(content)
"`#{inner}`"
end
defp render_link(content, attrs) do
caption = markdown_from_ast(content)
href = get_attr(attrs, "href", "")
"[#{caption}](#{href})"
end
defp render_image(attrs) do
alt = get_attr(attrs, "alt", "")
src = get_attr(attrs, "src", "")
title = get_attr(attrs, "title", "")
if title == "" do
"![#{alt}](#{src})"
else
~s/![#{alt}](#{src} "#{title}")/
end
end
defp render_comment([line]) do
line = String.trim(line)
"<!-- #{line} -->"
end
defp render_comment(lines) do
lines =
lines
|> Enum.drop_while(&blank?/1)
|> Enum.reverse()
|> Enum.drop_while(&blank?/1)
|> Enum.reverse()
Enum.join(["<!--" | lines] ++ ["-->"], "\n")
end
defp render_ruler(attrs) do
class = get_attr(attrs, "class", "thin")
case class do
"thin" -> "---"
"medium" -> "___"
"thick" -> "***"
end
end
defp render_line_break(), do: "\\\n"
defp render_paragraph(content), do: markdown_from_ast(content)
defp render_heading(n, content) do
title = markdown_from_ast(content)
String.duplicate("#", n) <> " " <> title
end
defp render_code_block(content, attrs) do
language = get_attr(attrs, "class", "")
"```#{language}\n#{content}\n```"
end
defp render_blockquote(content) do
inner = markdown_from_ast(content)
inner
|> String.split("\n")
|> Enum.map(&("> " <> &1))
|> Enum.join("\n")
end
defp render_table([{"thead", _, [head_row], %{}}, {"tbody", _, body_rows, %{}}]) do
alignments = alignments_from_row(head_row)
cell_grid = cell_grid_from_rows([head_row | body_rows])
column_widths = max_length_per_column(cell_grid)
[head_cells | body_cell_grid] = Enum.map(cell_grid, &pad_whitespace(&1, column_widths))
separator_cells = build_separator_cells(alignments, column_widths)
cell_grid_to_md_table([head_cells, separator_cells | body_cell_grid])
end
defp render_table([{"tbody", _, body_rows, %{}}]) do
cell_grid = cell_grid_from_rows(body_rows)
column_widths = max_length_per_column(cell_grid)
cell_grid = Enum.map(cell_grid, &pad_whitespace(&1, column_widths))
cell_grid_to_md_table(cell_grid)
end
defp cell_grid_from_rows(rows) do
Enum.map(rows, fn {"tr", _, columns, %{}} ->
Enum.map(columns, fn {tag, _, content, %{}} when tag in ["th", "td"] ->
markdown_from_ast(content)
end)
end)
end
defp alignments_from_row({"tr", _, columns, %{}}) do
Enum.map(columns, fn {tag, attrs, _, %{}} when tag in ["th", "td"] ->
style = get_attr(attrs, "style", nil)
case style do
"text-align: left;" -> :left
"text-align: center;" -> :center
"text-align: right;" -> :right
end
end)
end
defp build_separator_cells(alignments, widths) do
alignments
|> Enum.zip(widths)
|> Enum.map(fn
{:left, length} -> String.duplicate("-", length)
{:center, length} -> ":" <> String.duplicate("-", length - 2) <> ":"
{:right, length} -> String.duplicate("-", length - 1) <> ":"
end)
end
defp max_length_per_column(cell_grid) do
cell_grid
|> List.zip()
|> Enum.map(&Tuple.to_list/1)
|> Enum.map(fn cells ->
cells
|> Enum.map(&String.length/1)
|> Enum.max()
end)
end
defp pad_whitespace(cells, widths) do
cells
|> Enum.zip(widths)
|> Enum.map(fn {cell, width} ->
String.pad_trailing(cell, width, " ")
end)
end
defp cell_grid_to_md_table(cell_grid) do
cell_grid
|> Enum.map(fn cells ->
"| " <> Enum.join(cells, " | ") <> " |"
end)
|> Enum.join("\n")
end
defp render_unordered_list(content) do
marker_fun = fn _index -> "* " end
render_list(content, marker_fun, " ")
end
defp render_ordered_list(content) do
marker_fun = fn index -> "#{index + 1}. " end
render_list(content, marker_fun, " ")
end
defp render_list(items, marker_fun, indent) do
spaced? = spaced_list_items?(items)
item_separator = if(spaced?, do: "\n\n", else: "\n")
items
|> Enum.map(fn {"li", _, content, %{}} -> markdown_from_ast(content) end)
|> Enum.with_index()
|> Enum.map(fn {inner, index} ->
[first_line | lines] = String.split(inner, "\n")
first_line = marker_fun.(index) <> first_line
lines =
Enum.map(lines, fn
"" -> ""
line -> indent <> line
end)
Enum.join([first_line | lines], "\n")
end)
|> Enum.join(item_separator)
end
defp spaced_list_items?([{"li", _, [{"p", _, _content, %{}} | _], %{}} | _items]), do: true
defp spaced_list_items?([_ | items]), do: spaced_list_items?(items)
defp spaced_list_items?([]), do: false
# Helpers
defp get_attr(attrs, key, default) do
Enum.find_value(attrs, default, fn {attr_key, attr_value} ->
attr_key == key && attr_value
end)
end
defp attrs_to_string(attrs) do
attrs
|> Enum.map(fn {key, value} -> ~s/#{key}="#{value}"/ end)
|> Enum.join(" ")
end
defp blank?(string), do: String.trim(string) == ""
end | lib/livebook/live_markdown/markdown_helpers.ex | 0.706899 | 0.515498 | markdown_helpers.ex | starcoder |
defmodule Placid.Response.Rendering do
defmodule UnsupportedResponseTypeError do
@moduledoc """
Error raised when a rendering engine cannot be found
for a specified response content type.
"""
defexception [ :message ]
end
defmodule Engine do
@moduledoc false
@type data :: Keyword | Map | List
@type type :: binary
@type subtype :: binary
@callback serialize(data, type, subtype) :: { :ok, binary } | :next
end
@moduledoc """
`Placid.Response.Rendering` provides the ability for a response to be
automatically serialized based on its content-type.
## Engines
Rendering engines translate Elixir terms into a serialized format. Each engine
is responsible for a single type of content and is capable of rendering for
multiple mime types.
Rendering engines should implement callbacks for the following behaviour:
defmodule Placid.Response.Rendering.Engine do
@type data :: Keyword | Map | List
@type type :: binary
@type subtype :: binary
@callback serialize(data, type, subtype) :: { :ok, binary } | :next
end
Current, built-in implementations include:
* `Placid.Response.Rendering.Engines.JSON`
* `Placid.Response.Rendering.Engines.XML`
"""
alias Placid.Response.Rendering.Engines
@unsent [ :unset, :set ]
@engines [ Engines.JSON,
Engines.XML ]
@doc """
Serializes `data` when an available rendering engine exists for the given
`content_type`.
## Arguments
* `conn` - `Plug.Conn` - the current connection
* `data` - `List` | `Map` | `Struct` - Elixir terms to be serialized
* `content_type` - `String` - response content-type
## Returns
`Plug.Conn`
"""
def serialize_to_body(%Plug.Conn{ state: state } = conn, data, accept) when state in @unsent do
engines = Application.get_env(:placid, :rendering_engines, @engines)
# Extract media types
resp = Plug.Conn.Utils.list(accept) |> Enum.map(&Plug.Conn.Utils.media_type/1) |>
# Remove errors
Enum.filter(fn :error -> false
_ -> true end) |>
# Sort by quality
Enum.sort_by(fn {:ok, _type, _subtype, %{"q" => q}} -> String.to_float(q)
{:ok, _type, _subtype, %{}} -> 1
:error -> 0
end) |>
# Descending order
Enum.reverse |>
# Attempt rendering with a matching engine
reduce_types(data, engines)
if is_nil(resp) do
raise UnsupportedResponseTypeError, message: "unsupported media type #{accept}"
else
{type, subtype, body} = resp
%Plug.Conn{ conn | resp_body: body, state: :set } |> Plug.Conn.put_resp_content_type("#{type}/#{subtype}")
end
end
def serialize_to_body(conn, _data, _ct), do: conn
defp reduce_types([{:ok, type, subtype, _params}|types], data, engines) do
case reduce(engines, data, type, subtype) do
nil ->
reduce_types(types, data, engines)
{_type, _subtype_, _body} = resp ->
resp
end
end
defp reduce_types([], _data, _engines) do
nil
end
defp reduce([engine|engines], data, type, subtype) do
case engine.serialize(data, type, subtype) do
{ :ok, body } ->
{type, subtype} = engine.normalize_content_type(type, subtype)
{type, subtype, body}
:next -> reduce engines, data, type, subtype
end
end
defp reduce([], _, _type, _subtype) do
nil
end
end | lib/placid/response/rendering.ex | 0.899114 | 0.471892 | rendering.ex | starcoder |
defmodule Prometheus.Metric.Counter do
@moduledoc """
Counter is a Metric that represents a single numerical value that only ever
goes up. That implies that it cannot be used to count items whose number can
also go down, e.g. the number of currently running processes. Those
"counters" are represented by `Prometheus.Metric.Gauge`.
A Counter is typically used to count requests served, tasks completed, errors
occurred, etc.
Example use cases for Counters:
- Number of requests processed;
- Number of items that were inserted into a queue;
- Total amount of data that a system has processed.
Use the [`rate()`](https://prometheus.io/docs/querying/functions/#rate())/
[`irate()`](https://prometheus.io/docs/querying/functions/#irate())
functions in Prometheus to calculate the rate of increase of a Counter.
By convention, the names of Counters are suffixed by `_total`.
To create a counter use either `new/1` or `declare/1`, the difference is that
`new/` will raise `Prometheus.MFAlreadyExistsError` exception if counter with
the same `registry`, `name` and `labels` combination already exists.
Both accept `spec` `Keyword` with the same set of keys:
- `:registry` - optional, default is `:default`;
- `:name` - required, can be an atom or a string;
- `:help` - required, must be a string;
- `:labels` - optional, default is `[]`.
Example:
```
defmodule MyServiceInstrumenter do
use Prometheus.Metric
## to be called at app/supervisor startup.
## to tolerate restarts use declare.
def setup() do
Counter.declare([name: :my_service_requests_total,
help: "Requests count.",
labels: [:caller]])
end
def inc(caller) do
Counter.inc([name: :my_service_requests_total,
labels: [caller]])
end
end
```
"""
use Prometheus.Erlang, :prometheus_counter
@doc """
Creates a counter using `spec`.
Raises `Prometheus.MissingMetricSpecKeyError` if required `spec` key is missing.<br>
Raises `Prometheus.InvalidMetricNameError` if metric name is invalid.<br>
Raises `Prometheus.InvalidMetricHelpError` if help is invalid.<br>
Raises `Prometheus.InvalidMetricLabelsError` if labels isn't a list.<br>
Raises `Prometheus.InvalidLabelNameError` if label name is invalid.<br>
Raises `Prometheus.MFAlreadyExistsError` if a counter with
the same `spec` already exists.
"""
delegate new(spec)
@doc """
Creates a counter using `spec`.
If a counter with the same `spec` exists returns `false`.
Raises `Prometheus.MissingMetricSpecKeyError` if required `spec` key is missing.<br>
Raises `Prometheus.InvalidMetricNameError` if metric name is invalid.<br>
Raises `Prometheus.InvalidMetricHelpError` if help is invalid.<br>
Raises `Prometheus.InvalidMetricLabelsError` if labels isn't a list.<br>
Raises `Prometheus.InvalidLabelNameError` if label name is invalid.
"""
delegate declare(spec)
@doc """
Increments the counter identified by `spec` by `value`.
Raises `Prometheus.InvalidValueError` exception if `value` isn't a positive number.<br>
Raises `Prometheus.UnknownMetricError` exception if a counter
for `spec` can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric inc(spec, value \\ 1)
@doc """
Increments the counter identified by `spec` by 1 when `body` executed.
Read more about bodies: `Prometheus.Injector`.
Raises `Prometheus.UnknownMetricError` exception if a counter
for `spec` can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
defmacro count(spec, body) do
env = __CALLER__
Prometheus.Injector.inject(
fn block ->
quote do
Prometheus.Metric.Counter.inc(unquote(spec), 1)
unquote(block)
end
end,
env,
body
)
end
@doc """
Increments the counter identified by `spec` by 1 when `body` raises `exception`.
Read more about bodies: `Prometheus.Injector`.
Raises `Prometheus.UnknownMetricError` exception if a counter
for `spec` can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
defmacro count_exceptions(spec, exception \\ :_, body) do
env = __CALLER__
Prometheus.Injector.inject(
fn block ->
quote do
require Prometheus.Error
Prometheus.Error.with_prometheus_error(
try do
unquote(block)
rescue
e in unquote(exception) ->
stacktrace =
unquote(
if macro_exported?(Kernel.SpecialForms, :__STACKTRACE__, 0) do
quote(do: __STACKTRACE__)
else
quote(do: System.stacktrace())
end
)
{registry, name, labels} = Prometheus.Metric.parse_spec(unquote(spec))
:prometheus_counter.inc(registry, name, labels, 1)
reraise(e, stacktrace)
end
)
end
end,
env,
body
)
end
@doc """
Increments the counter identified by `spec` by 1 when `body` raises no exceptions.
Read more about bodies: `Prometheus.Injector`.
Raises `Prometheus.UnknownMetricError` exception if a counter
for `spec` can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
defmacro count_no_exceptions(spec, body) do
env = __CALLER__
Prometheus.Injector.inject(
fn block ->
quote do
require Prometheus.Error
value = unquote(block)
Prometheus.Metric.Counter.inc(unquote(spec), 1)
value
end
end,
env,
body
)
end
@doc """
Removes counter series identified by spec.
Raises `Prometheus.UnknownMetricError` exception if a counter
for `spec` can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric remove(spec)
@doc """
Resets the value of the counter identified by `spec`.
Raises `Prometheus.UnknownMetricError` exception if a counter
for `spec` can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric reset(spec)
@doc """
Returns the value of the counter identified by `spec`. If there is no counter for
given labels combination, returns `:undefined`.
Raises `Prometheus.UnknownMetricError` exception if a counter
for `spec` can't be found.<br>
Raises `Prometheus.InvalidMetricArityError` exception if labels count mismatch.
"""
delegate_metric value(spec)
end | astreu/deps/prometheus_ex/lib/prometheus/metric/counter.ex | 0.915224 | 0.901314 | counter.ex | starcoder |
defmodule Nookal.Utils do
@moduledoc false
def extract_fields([{field, key, type} | rest], payload, acc) do
case Map.fetch(payload, key) do
{:ok, value} ->
with {:ok, cast_value} <- cast(value, type) do
extract_fields(rest, payload, %{acc | field => cast_value})
end
:error ->
{:error, "could not fetch #{inspect(key)} in payload"}
end
end
def extract_fields([], _payload, acc) do
{:ok, acc}
end
def cast(nil, _type), do: {:ok, nil}
def cast(values, {:list, type}) when is_list(values) do
with {:error, _reason} <- all_or_none_map(values, &cast(&1, type)) do
cast_error(values, {:list, type})
end
end
def cast(value, :string) when is_binary(value) do
{:ok, value}
end
def cast(value, :integer) when is_binary(value) do
case Integer.parse(value) do
{integer, ""} -> {:ok, integer}
_other -> cast_error(value, :integer)
end
end
def cast(value, :integer) when is_integer(value) do
{:ok, value}
end
def cast(value, :date) when is_binary(value) do
with {:error, _reason} <- Date.from_iso8601(value) do
cast_error(value, :date)
end
end
def cast(value, :naive_date_time) when is_binary(value) do
with [date, time] <- String.split(value, " "),
{:ok, date} <- Date.from_iso8601(date),
{:ok, time} <- Time.from_iso8601(time),
{:ok, naive_datetime} <- NaiveDateTime.new(date, time) do
{:ok, naive_datetime}
else
_ -> {:ok, nil}
end
end
def cast(value, :time) when is_binary(value) do
with {:error, _reason} <- Time.from_iso8601(value) do
cast_error(value, :time)
end
end
def cast(value, :boolean) when is_binary(value) do
case value do
"1" -> {:ok, true}
"0" -> {:ok, false}
_other -> cast_error(value, :boolean)
end
end
def cast(value, :original) do
{:ok, value}
end
def cast(value, type), do: cast_error(value, type)
@compile {:inline, [cast_error: 2]}
defp cast_error(value, type) do
{:error, "could not cast #{inspect(value)} to #{inspect(type)}"}
end
def all_or_none_map(enumerables, fun) do
result =
Enum.reduce_while(enumerables, [], fn element, mapped ->
case fun.(element) do
{:ok, element} -> {:cont, [element | mapped]}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
case result do
{:error, reason} -> {:error, reason}
mapped -> {:ok, Enum.reverse(mapped)}
end
end
end | lib/nookal/utils.ex | 0.775817 | 0.400779 | utils.ex | starcoder |
defmodule LiveElement.Renderer do
@moduledoc false
defmacro __before_compile__(env) do
render? = Module.defines?(env.module, {:render, 1})
root = Path.dirname(env.file)
filename = template_filename(env)
templates = Phoenix.Template.find_all(root, filename)
case {render?, templates} do
{true, [template | _]} ->
IO.warn(
"ignoring template #{inspect(template)} because the LiveView " <>
"#{inspect(env.module)} defines a render/1 function",
Macro.Env.stacktrace(env)
)
:ok
{true, []} ->
:ok
{false, [template]} ->
ext = template |> Path.extname() |> String.trim_leading(".") |> String.to_atom()
engine = Map.fetch!(Phoenix.Template.engines(), ext)
IO.inspect(engine)
ast = engine.compile(template, filename)
quote do
@file unquote(template)
@external_resource unquote(template)
def render(var!(assigns)) when is_map(var!(assigns)) do
unquote(ast)
end
end
{false, [_ | _]} ->
IO.warn(
"multiple templates were found for #{inspect(env.module)}: #{inspect(templates)}",
Macro.Env.stacktrace(env)
)
:ok
{false, []} ->
template = Path.join(root, filename <> ".heex")
message = ~s'''
render/1 was not implemented for #{inspect(env.module)}.
Make sure to either explicitly define a render/1 clause with a LiveView template:
def render(assigns) do
~H"""
...
"""
end
Or create a file at #{inspect(template)} with the LiveView template.
'''
IO.warn(message, Macro.Env.stacktrace(env))
quote do
@external_resource unquote(template)
def render(_assigns) do
raise unquote(message)
end
end
end
end
defp template_filename(env) do
env.module
|> Module.split()
|> List.last()
|> Macro.underscore()
|> Kernel.<>(".html")
end
end | lib/live_element/renderer.ex | 0.5794 | 0.412471 | renderer.ex | starcoder |
defmodule AdventOfCode.Solutions.Day03 do
@moduledoc """
Solution for day 3 exercise.
### Exercise
https://adventofcode.com/2021/day/3
"""
require Logger
alias AdventOfCode.Utils
def energy_consumption(filename) do
[gamma, epsilon] =
filename
|> File.read!()
|> parse_file()
|> calculate_energy_indicators()
result = values_to_integer(gamma) * values_to_integer(epsilon)
IO.puts("Energy consumption of #{result}")
end
def life_support_rating(filename) do
[o2_generator, co2_scrubber] =
filename
|> File.read!()
|> parse_file()
|> calculate_life_support_indicators()
result = values_to_integer(o2_generator) * values_to_integer(co2_scrubber)
IO.puts("Life support rating of #{result}")
end
defp parse_file(file_content) do
file_content
|> String.replace("\r\n", "\n")
|> String.split("\n")
|> Enum.reject(&(&1 == ""))
|> Enum.map(&String.graphemes/1)
end
defp calculate_energy_indicators(values) do
transposed_values = Utils.transpose_matrix(values)
gamma = Enum.map(transposed_values, &most_frequent_value/1)
epsilon = Enum.map(gamma, &opposite_binary/1)
[gamma, epsilon]
end
defp calculate_life_support_indicators(values) do
o2_generator = calculate_life_indicator(values, :o2_generator)
co2_scrubber = calculate_life_indicator(values, :co2_scrubber)
[o2_generator, co2_scrubber]
end
defp calculate_life_indicator(values, mode, position \\ 0)
defp calculate_life_indicator([value], _mode, _position), do: value
defp calculate_life_indicator(values, mode, position) do
transposed_values = Utils.transpose_matrix(values)
filtering_value =
transposed_values
|> Enum.at(position)
|> most_frequent_value()
filtering_value =
case mode do
:o2_generator -> filtering_value
:co2_scrubber -> opposite_binary(filtering_value)
end
filtered_values = Enum.reject(values, &(Enum.at(&1, position) != filtering_value))
calculate_life_indicator(filtered_values, mode, position + 1)
end
defp values_to_integer(values) do
values
|> Enum.join()
|> Integer.parse(2)
|> elem(0)
end
defp opposite_binary("0"), do: "1"
defp opposite_binary("1"), do: "0"
defp most_frequent_value(list) do
list
|> Enum.frequencies()
|> case do
%{"0" => zero_occ, "1" => one_occ} when zero_occ > one_occ -> "0"
_ -> "1"
end
end
end | lib/advent_of_code/solutions/day03.ex | 0.731346 | 0.540803 | day03.ex | starcoder |
defmodule JOSEVirtualHSM do
@moduledoc """
Virtual JOSE HSM for signing JWSes and decrypting JWEs
It is a virtual HSM in the sense that keys private keys for signing and decrypting are not
available to other processes, and are particularly protected against leaking:
- there are stored in a private ETS
- processes dealing with these keys are marked as sensitive
- keys loaded from the disk or the environment can be deleted after loading (ideal for
container deployment)
Other features include:
- keys can be generated automatically given a specification, so that there **no secret to
handle** during deployment
- it is automatically clusterized: any node can sign a JWS or decrypt a JWE with any key
in the cluster. Nodes don't share keys (they can't) but can work with each other in a
transparent fashion
- key ID is automatically generated using
[RFC7638 - JSON Web Key (JWK) Thumbprint](https://tools.ietf.org/html/rfc7638)
- however, note that due to a limitation in the underlying `JOSE` library, JWSes do not
include the kid in their header
## Launching `JOSEVirtualHSM`
`JOSEVirtualHSM` is a `GenServer` that must be launched in a supervised manner at application
startup. In your `app/application.ex` file, add:
children = [
JOSEVirtualHSM
]
or
children = [
{JOSEVirtualHSM, opts...}
]
where `opts` is a `Keyword` to the list of children.
## Options
- `:delete_on_load`: when loading a private key from a file or the environment, this
options, when set to `true`, deletes the key after loading. Defaults to `true`
- `:keys`: the list of keys to load. See `t:key_load_specs/0` for the different methods
to load keys
## Environment options
The key specification can also be retrieved from the environment options:
`config/config.exs`
config :jose_virtual_hsm, :keys, [
{:auto_gen, {:ec, "P-256"}, %{"use" => "sig"}},
{:auto_gen, {:rsa, 2048}, %{"use" => "sig"}},
{:auto_gen, {:okp, :Ed25519}, %{"use" => "sig"}},
{:auto_gen, {:ec, "P-256"}, %{"use" => "enc"}},
{:auto_gen, {:rsa, 2048}, %{"use" => "enc"}},
{:auto_gen, {:okp, :X25519}, %{"use" => "enc"}}
]
This key specification is used in the following examples.
## Example
### Retrieving public keys
iex> JOSEVirtualHSM.public_keys()
[
%{
"crv" => "X25519",
"kid" => "<KEY>",
"kty" => "OKP",
"use" => "enc",
"x" => "<KEY>"
},
%{
"crv" => "Ed25519",
"kid" => "<KEY>",
"kty" => "OKP",
"use" => "sig",
"x" => "<KEY>"
},
%{
"e" => "AQAB",
"kid" => "<KEY>",
"kty" => "RSA",
"n" => "<KEY>",
"use" => "sig"
},
%{
"e" => "AQAB",
"kid" => "<KEY>",
"kty" => "RSA",
"n" => "<KEY>",
"use" => "enc"
},
%{
"crv" => "P-256",
"kid" => "<KEY>",
"kty" => "EC",
"use" => "enc",
"x" => "jopq4PgS4w9721MwJppxw7niV-1zqgtBd-JeVWPuBcU",
"y" => "Eo1xbm0g5AsB8GSiXKHRynXH2OwRcMO9i-6PTi-k-GE"
},
%{
"crv" => "P-256",
"kid" => "<KEY>",
"kty" => "EC",
"use" => "sig",
"x" => "<KEY>",
"y" => "<KEY>"
}
]
These public keys can obviously be shared with third parties. They can be used:
- to verify signature of a JWS signed by `JOSEVirtualHSM`
- to encrypt a JWE to be sent to the server using `JOSEVirtualHSM`
`JOSEVirtualHSM` doesn't support JWS verification and JWE encryption. For that, use
`JOSE` or `JOSEUtils` instead.
### Signing:
iex> JOSEVirtualHSM.sign(%{"hello" => "world"})
{:ok,
{"<KEY>",
%{
"e" => "AQAB",
"kid" => "<KEY>",
"kty" => "RSA",
"n" => "<KEY>",
"use" => "sig"
}}}
iex> JOSEVirtualHSM.sign(%{"hello" => "world"})
{:ok,
{"<KEY>",
%{
"crv" => "Ed25519",
"kid" => "<KEY>",
"kty" => "OKP",
"use" => "sig",
"x" => "<KEY>"
}}}
iex> JOSEVirtualHSM.sign(%{"hello" => "world"})
{:ok,
{"<KEY>",
%{
"crv" => "Ed25519",
"kid" => "<KEY>",
"kty" => "OKP",
"use" => "sig",
"x" => "<KEY>"
}}}
iex> JOSEVirtualHSM.sign(%{"hello" => "world"})
{:ok,
{"<KEY>",
%{
"crv" => "P-256",
"kid" => "<KEY>",
"kty" => "EC",
"use" => "sig",
"x" => "<KEY>",
"y" => "<KEY>"
}}}
Notice how keys where chosen randomly from all the available keys. `JOSEVirtualHSM` always
prefers keys on local node, when available. It's possible to specify how to sign using
`t:JOSEUtils.JWK.key_selector/0`:
iex> JOSEVirtualHSM.sign(%{"hello" => "world"}, alg: ["ES256", "ES384", "ES512"])
{:ok,
{"<KEY>",
%{
"crv" => "P-256",
"kid" => "<KEY>",
"kty" => "EC",
"use" => "sig",
"x" => "<KEY>",
"y" => "<KEY>"
}}}
iex> JOSEVirtualHSM.sign(%{"hello" => "world"}, kty: "OKP")
{:ok,
{"<KEY>",
%{
"crv" => "Ed25519",
"kid" => "<KEY>",
"kty" => "OKP",
"use" => "sig",
"x" => "<KEY>"
}}}
### Decryption
With RSA:
iex> jwk_pub = JOSEVirtualHSM.public_keys() |> Enum.find(&(&1["kty"] == "RSA" and &1["use"] == "enc"))
%{
"e" => "AQAB",
"kid" => "<KEY>",
"kty" => "RSA",
"n" => "<KEY>ZBs39-hCU92EtA7CS0IQ_rvvAfvlLV3T-tjQ",
"use" => "enc"
}
iex> jwe = JOSEUtils.JWE.encrypt!(%{"very" => "secret"}, jwk_pub, "RSA-OAEP", "A128GCM")
"<KEY>"
iex> JOSEVirtualHSM.decrypt(jwe)
{:ok,
{"{\\"very\\":\\"secret\\"}",
%{
"e" => "AQAB",
"kid" => "<KEY>",
"kty" => "RSA",
"n" => "5GX4GERxJ2rV-w5T2G00D3-HLEriX<KEY>",
"use" => "enc"
}}}
With ECDH-ES:
iex> jwk_pub = JOSEVirtualHSM.public_keys() |> Enum.find(&(&1["kty"] == "EC" and &1["use"] == "enc"))
%{
"crv" => "P-256",
"kid" => "<KEY>",
"kty" => "EC",
"use" => "enc",
"x" => "<KEY>",
"y" => "<KEY>"
}
iex> my_jwk_priv = JOSE.JWK.generate_key({:ec, "P-256"}) |> JOSE.JWK.to_map() |> elem(1)
%{
"crv" => "P-256",
"d" => "TsfNgJq_UEWdf0rqp2W5OQJQMbtANMMWwguNO4VrZkM",
"kty" => "EC",
"x" => "UIZ5br7q2li5NzcZePOiK4Wi3jV4xATVT4Yie8xMRT8",
"y" => "eiLF2EUWFbPX2MTchz_h-VbiEjnJ9koB-6kVqWF3kBo"
}
iex> jwe = JOSEUtils.JWE.encrypt!(%{"very" => "secret"}, {jwk_pub, my_jwk_priv}, "ECDH-ES", "A128GCM")
"<KEY>..16AhXI2qu9cw7A6e.dG_TaBdpAJHgR962LxThdWo.uBtZ3N55sztIRgCFwzC5hw"
iex> JOSEVirtualHSM.decrypt(jwe)
{:ok,
{"{\\"very\\":\\"secret\\"}",
%{
"crv" => "P-256",
"kid" => "ltu_BZFFssJhqlTdKvf3VWu7z9dFKhwFxXSx8Q-bpw4",
"kty" => "EC",
"use" => "enc",
"x" => "jopq4PgS4w9721MwJppxw7niV-1zqgtBd-JeVWPuBcU",
"y" => "Eo1xbm0g5AsB8GSiXKHRynXH2OwRcMO9i-6PTi-k-GE"
}}}
## Clustering
The `JOSEVirtualHSM` of the current node listens to other `JOSEVirtualHSM` on joining and
leaving of other nodes, and registers their public keys and deletes them when needed.
This is based on BEAM distribution. Other distribution methods (such as using Redis as an
intermediary) are **not** supported.
## Architecture
Each node runs its own instance of `JOSEVirtualHSM`, which is a `GenServer`. This
`GenServer` has the following roles:
- on startup, it loads the keys from the key specification
- it stores local private keys in a private ETS
- it listens for joining and leaving nodes to gain knowledge of available keys
When an operation is requested for a local key, the local instance of `JOSEVirtualHSM`
launches a worker process and sends it the required private keys to perform the signing or
decryption operation. This process is in charge of:
- performing the signing or decryption operation
- answer to the original process
The `JOSEVirtualHSM` instance keeps track of the launched process and responds with an error
to the calling process if the worker process died in an abnormal manner.
The number of worker processes is **not** limited. No queueing or pooling method is
implemented. As a consequence, a server could become unresponsive and overwhelmed should too
many signing or decryption requests arrive at the same time. Any PR implementing it is
welcome :)
"""
# records stored in this table are of the form:
# {kid, jwk_pub, [nodes]}
@pub_keys_tab Module.concat(__MODULE__, PublicKeys)
@enc_ecdh_algs ["ECDH-ES", "ECDH-ES+A128KW", "ECDH-ES+A192KW", "ECDH-ES+A256KW"]
use GenServer
alias JOSEVirtualHSM.{
DecryptionError,
NoSuitableKeyFoundError,
Worker,
WorkerError
}
require Logger
@type key_fields :: %{optional(String.t()) => any()}
@type key_load_specs :: [key_load_spec()]
@type key_load_spec ::
{:auto_gen, {:ec, curve :: String.t()}}
| {:auto_gen, {:ec, curve :: String.t()}, key_fields()}
| {:auto_gen, {:okp, :Ed25519 | :Ed448 | :X25519 | :X448}}
| {:auto_gen, {:okp, :Ed25519 | :Ed448 | :X25519 | :X448}, key_fields()}
| {:auto_gen, {:rsa, modulus_size :: non_neg_integer()}}
| {:auto_gen, {:rsa, modulus_size :: non_neg_integer()}, key_fields()}
| {:pem_file, Path.t()}
| {:pem_file, Path.t(), key_fields()}
| {:der_file, Path.t()}
| {:der_file, Path.t(), key_fields()}
| {:pem_env, var_name :: String.t()}
| {:pem_env, var_name :: String.t(), key_fields()}
| {:der_env, var_name :: String.t()}
| {:der_env, var_name :: String.t(), key_fields()}
| {:map_env, var_name :: String.t()}
@doc """
Starts a supervised JOSE virtual HSM
## Options
- `:delete_on_load`: deletes the file or environment option of a key after loading it.
Boolean, defaults to `true`
- `:keys`: the list of keys to load. See `t:key_load_specs/0` for the different methods
to load keys
"""
def start_link(opts) do
opts =
opts
|> Enum.into(%{})
|> Map.put_new(:delete_on_load, true)
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
@doc """
Return the registered public keys
## Options
- `:local_only`: when set to `true`, only returns the local keys, not those registered from other
nodes. Defaults to `false`
"""
@spec public_keys(Keyword.t()) :: [JOSEUtils.JWK.t()]
def public_keys(opts \\ []) do
filter_fun =
if opts[:local_only] == true do
fn {_kid, _jwk_pub, nodes} -> node() in nodes end
else
fn _ -> true end
end
@pub_keys_tab
|> :ets.tab2list()
|> Enum.filter(filter_fun)
|> Enum.map(fn {_kid, jwk_pub, _nodes} -> jwk_pub end)
end
@doc """
Signs a message with one of the available signing keys
If the payload is a string, it is signed as is. Otherwise, it is encoded to a string using
`Jason.encode/1`. Example:
JOSEVirtualHSM.sign(%{"Hello" => "Tomo"})
The second parameter can be used to further specify which type of key to use:
JOSEVirtualHSM.sign(%{"Hello" => "Tomo"}, kty: "RSA")
JOSEVirtualHSM.sign(%{"Hello" => "Tomo"}, crv: "P-256")
JOSEVirtualHSM.sign(%{"Hello" => "Tomo"}, alg: ["EdDSA", "RS512"])
and can be used to use a specific key as well:
JOSEVirtualHSM.sign(%{"Hello" => "Tomo"}, kid: ""<KEY>"")
When more than one key is available for signing, one is chosen randomly. Don't be
surprised if signing returns JWSes signed with different algorithms!
"""
@spec sign(
payload :: String.t() | any(),
JOSEUtils.JWK.key_selector(),
timeout :: non_neg_integer()
) :: {:ok, {signed_payload :: String.t(), JOSEUtils.JWK.t()}} | {:error, Exception.t()}
def sign(payload, key_selector \\ [], timeout \\ 30_000)
def sign(<<_::binary>> = payload, key_selector, timeout) do
key_selector =
key_selector
|> Keyword.put(:use, "sig")
|> Keyword.put(:key_ops, "sign")
@pub_keys_tab
|> :ets.tab2list()
|> Enum.filter(&JOSEUtils.JWK.match_key_selector?(elem(&1, 1), key_selector))
|> Enum.split_with(fn {_, _, nodes} -> node() in nodes end)
|> case do
{[], []} ->
{:error, %NoSuitableKeyFoundError{}}
{[_ | _] = local_keys, _remote_keys} ->
{kid, _jwk, _nodes} = Enum.random(local_keys)
GenServer.call(JOSEVirtualHSM, {:sign, kid, key_selector[:alg], payload}, timeout)
{[], remote_keys} ->
{kid, _jwk, target_nodes} = Enum.random(remote_keys)
GenServer.call(
{JOSEVirtualHSM, Enum.random(target_nodes)},
{:sign, kid, key_selector[:alg], payload},
timeout
)
end
rescue
e ->
{:error, e}
end
def sign(payload, key_selector, timeout) do
with {:ok, payload} <- Jason.encode(payload) do
sign(payload, key_selector, timeout)
end
end
@doc """
Decrypts a JWE encrypted with a public key of `JOSEVirtualHSM`
As the encryption key can be located on any node running `JOSEVirtualHSM` in the cluster,
this function:
- retains only the keys that could have been used for encryption
- tries decrypting the JWE sequentially on each possible node (it does not try to decrypt in
parallel for performance reason: this would overload the `JOSEVirtualHSM` instances)
For instance:
JOSEVirtualHSM.decrypt(jwe)
This function determines automatically the algorithms in use from the JWE header. The second
parameter may be used to further select specific keys:
JOSEVirtualHSM.decrypt(jwe, kid: "iBRaf9ugUtDUe2i2cAY9i4N315O6f_cSNeEEDi9wuQY")
"""
@spec decrypt(
jwe :: JOSEUtils.JWE.serialized(),
JOSEUtils.JWK.key_selector(),
timeout :: non_neg_integer()
) ::
{:ok, decrypted_content :: String.t()} | {:error, Exception.t()}
def decrypt(<<_::binary>> = jwe, key_selector \\ [], timeout \\ 30_000) do
with {:ok, %{"alg" => alg, "enc" => enc} = jwe_header} <- JOSEUtils.JWE.peek_header(jwe) do
key_selector = decrypt_update_key_selector_from_jwe_header(key_selector, jwe_header)
all_keys = :ets.tab2list(@pub_keys_tab)
suitable_kids =
all_keys
|> Enum.map(fn {_kid, jwk, _nodes} -> jwk end)
|> JOSEUtils.JWKS.decryption_keys()
|> Enum.filter(&JOSEUtils.JWK.match_key_selector?(&1, alg: alg, enc: enc))
|> Enum.filter(&JOSEUtils.JWK.match_key_selector?(&1, key_selector))
|> Enum.map(& &1["kid"])
suitable_kids_and_nodes =
for kid <- suitable_kids do
{_kid, _jwk, nodes} =
Enum.find(all_keys, fn
{^kid, _, _} -> true
_ -> false
end)
{kid, nodes}
end
case suitable_kids_and_nodes do
[_ | _] ->
node_key_mapping =
Enum.reduce(suitable_kids_and_nodes, %{}, fn {kid, nodes}, acc ->
case Enum.find(nodes, fn node -> node in Map.keys(acc) end) do
nil ->
if node() in nodes,
do: Map.put(acc, node(), [kid]),
else: Map.put(acc, List.first(nodes), [kid])
node ->
Map.update!(acc, node, &[kid | &1])
end
end)
do_decrypt(jwe, Enum.into(node_key_mapping, []), alg, enc, timeout)
[] ->
{:error, %NoSuitableKeyFoundError{}}
end
end
end
defp decrypt_update_key_selector_from_jwe_header(key_selector, %{"epk" => epk}) do
key_selector =
case epk do
%{"kty" => kty} ->
Keyword.put(key_selector, :kty, kty)
_ ->
key_selector
end
case epk do
%{"crv" => crv} ->
Keyword.put(key_selector, :crv, crv)
_ ->
key_selector
end
end
defp decrypt_update_key_selector_from_jwe_header(key_selector, _) do
key_selector
end
defp do_decrypt(_jwe, [], _alg_or_algs_or_nil, _enc_or_encs_or_nil, _timeout) do
{:error, %DecryptionError{}}
end
defp do_decrypt(jwe, [{node, kids} | rest], alg, enc, timeout) do
case GenServer.call({JOSEVirtualHSM, node}, {:decrypt, jwe, kids, alg, enc}, timeout) do
{:ok, _} = result ->
result
{:error, _} ->
do_decrypt(jwe, rest, alg, enc, timeout)
end
end
@doc """
Encrypts a payload with an ECDH algorithm
The JWK parameter is the public JWK of the recipient of the JWE, which is also the returned
JWK.
"""
@spec encrypt_ecdh(
payload :: any(),
jwk :: JOSEUtils.JWK.t(),
enc_alg :: JOSEUtils.JWA.enc_alg(),
enc_enc :: JOSEUtils.JWA.enc_enc(),
timeout :: non_neg_integer()
) :: {:ok, {JOSEUtils.JWE.serialized(), JOSEUtils.JWK.t()}} | {:error, Exception.t()}
def encrypt_ecdh(payload, jwk, enc_alg, enc_enc, timeout \\ 30_000)
def encrypt_ecdh(<<_::binary>> = payload, jwk, enc_alg, enc_enc, timeout)
when enc_alg in @enc_ecdh_algs do
key_selector = [alg: enc_alg, enc: enc_enc, kty: jwk["kty"], crv: jwk["crv"]]
all_keys = :ets.tab2list(@pub_keys_tab)
suitable_kids =
all_keys
|> Enum.map(fn {_kid, jwk, _nodes} -> jwk end)
|> JOSEUtils.JWKS.encryption_keys()
|> Enum.filter(&JOSEUtils.JWK.match_key_selector?(&1, key_selector))
|> Enum.map(& &1["kid"])
case suitable_kids do
[kid | _] ->
{_, _, nodes} =
Enum.find(all_keys, fn
{^kid, _, _} -> true
_ -> false
end)
if node() in nodes do
GenServer.call(
JOSEVirtualHSM,
{:encrypt_ecdh, payload, jwk, kid, enc_alg, enc_enc},
timeout
)
else
GenServer.call(
{JOSEVirtualHSM, Enum.random(nodes)},
{:encrypt_ecdh, payload, jwk, kid, enc_alg, enc_enc},
timeout
)
end
[] ->
{:error, %NoSuitableKeyFoundError{}}
end
end
def encrypt_ecdh(payload, jwk, enc_alg, enc_enc, timeout) do
with {:ok, payload_str} = Jason.encode(payload) do
encrypt_ecdh(payload_str, jwk, enc_alg, enc_enc, timeout)
end
end
@doc false
@spec register_public_key(node(), JOSEUtils.JWK.t()) :: any()
def register_public_key(node, jwk_pub) do
GenServer.cast(__MODULE__, {:register, node, jwk_pub})
end
# GenServer callbacks
@impl true
def init(opts) do
Process.flag(:trap_exit, true)
Process.flag(:sensitive, true)
:net_kernel.monitor_nodes(true)
:ets.new(@pub_keys_tab, [:named_table, read_concurrency: true])
jwk_priv_ets = :ets.new(nil, [:private, read_concurrency: true])
state =
opts
|> Map.put(:jwk_priv_ets, jwk_priv_ets)
|> Map.put(:worker_pids, %{})
load_keys(state)
{:ok, state}
end
@impl true
def format_status(_reason, [_pdict, _state]) do
nil
end
@impl true
def handle_call({:sign, kid, alg_or_algs_or_nil, payload}, from, state) do
case GenServer.start_link(Worker, []) do
{:ok, pid} ->
[{_kid, jwk_priv}] = :ets.lookup(state.jwk_priv_ets, kid)
state = %{state | worker_pids: Map.put(state.worker_pids, pid, from)}
GenServer.cast(pid, {:sign, from, jwk_priv, alg_or_algs_or_nil, payload})
{:noreply, state}
{:error, reason} ->
{:reply, {:error, %WorkerError{reason: reason}}, state}
end
end
def handle_call({:encrypt_ecdh, payload, jwk, kid, enc_alg, enc_enc}, from, state) do
case GenServer.start_link(Worker, []) do
{:ok, pid} ->
[{_kid, jwk_priv}] = :ets.lookup(state.jwk_priv_ets, kid)
state = %{state | worker_pids: Map.put(state.worker_pids, pid, from)}
GenServer.cast(pid, {:encrypt_ecdh, from, jwk_priv, jwk, enc_alg, enc_enc, payload})
{:noreply, state}
{:error, reason} ->
{:reply, {:error, %WorkerError{reason: reason}}, state}
end
end
def handle_call({:decrypt, jwe, kids, enc_alg, enc_enc}, from, state) do
case GenServer.start_link(Worker, []) do
{:ok, pid} ->
jwks_priv =
for kid <- kids,
do: :ets.lookup(state.jwk_priv_ets, kid) |> List.first() |> elem(1)
state = %{state | worker_pids: Map.put(state.worker_pids, pid, from)}
GenServer.cast(pid, {:decrypt, from, jwks_priv, jwe, enc_alg, enc_enc})
{:noreply, state}
{:error, reason} ->
{:reply, {:error, %WorkerError{reason: reason}}, state}
end
end
@impl true
def handle_cast({:register, node, jwk_pub}, state) do
do_register_public_key(jwk_pub, node)
Logger.info("#{__MODULE__}: registered new key `#{jwk_pub["kid"]}` from `#{node}`")
{:noreply, state}
end
@impl true
def handle_info({:nodeup, from_node}, state) do
case :rpc.call(from_node, __MODULE__, :public_keys, [[local_only: true]]) do
remote_jwk_pubs when is_list(remote_jwk_pubs) ->
for jwk_pub <- remote_jwk_pubs do
do_register_public_key(jwk_pub, from_node)
Logger.info("#{__MODULE__}: registered new key `#{jwk_pub["kid"]}` from `#{from_node}`")
end
_ ->
Logger.info("#{__MODULE__}: node `#{from_node}` joined, #{__MODULE__} not running on it")
end
{:noreply, state}
end
def handle_info({:nodedown, from_node}, state) do
@pub_keys_tab
|> :ets.tab2list()
|> Enum.each(fn
{kid, jwk_pub, nodes} ->
if from_node in nodes and node() not in nodes do
:ets.delete(@pub_keys_tab, kid)
Logger.info("#{__MODULE__}: deleted key `#{kid}` of disconnected `#{from_node}`")
else
:ets.insert(@pub_keys_tab, {kid, jwk_pub, nodes -- [from_node]})
end
_ ->
:ok
end)
{:noreply, state}
end
def handle_info({:EXIT, from_pid, reason}, state) do
{calling_process, worker_pids} = Map.pop(state.worker_pids, from_pid)
e =
case reason do
{reason, _stacktrace} ->
%WorkerError{reason: reason}
reason ->
%WorkerError{reason: reason}
end
if reason != :normal, do: GenServer.reply(calling_process, {:error, e})
{:noreply, Map.put(state, :worker_pids, worker_pids)}
end
defp do_register_public_key(jwk_pub, from_node) do
node_list =
case :ets.lookup(@pub_keys_tab, jwk_pub["kid"]) do
[{_kid, ^jwk_pub, node_list}] ->
node_list
|> MapSet.new()
|> MapSet.put(from_node)
|> MapSet.to_list()
[] ->
[from_node]
end
:ets.insert(@pub_keys_tab, {jwk_pub["kid"], jwk_pub, node_list})
end
defp load_keys(state) do
for key_conf <- state[:keys] || Application.get_env(:jose_virtual_hsm, :keys, []) do
jwk_priv = load_key(key_conf, state)
:ets.insert(state.jwk_priv_ets, {jwk_priv["kid"], jwk_priv})
jwk_pub = JOSEUtils.JWK.to_public(jwk_priv)
:ets.insert(@pub_keys_tab, {jwk_pub["kid"], jwk_pub, [node()]})
notify_new_key(jwk_pub)
end
end
@spec load_key(key_load_spec(), map()) :: JOSEUtils.JWK.t()
defp load_key({op, params}, state) do
load_key({op, params, %{}}, state)
end
defp load_key({:auto_gen, {:okp, :Ed448}, %{"use" => "enc"}}, _) do
raise "`:Ed448` cannot be used for encryption (use `:X448` instead)"
end
defp load_key({:auto_gen, {:okp, :Ed25519}, %{"use" => "enc"}}, _) do
raise "`:Ed25519` cannot be used for encryption (use `:X25519` instead)"
end
defp load_key({:auto_gen, {:okp, :X448}, %{"use" => "sig"}}, _) do
raise "`:X448` cannot be used for signature (use `:Ed448` instead)"
end
defp load_key({:auto_gen, {:okp, :X25519}, %{"use" => "sig"}}, _) do
raise "`:X25519` cannot be used for signature (use `:Ed25519` instead)"
end
defp load_key({:auto_gen, key_params, key_fields}, _state) do
key_params
|> JOSE.JWK.generate_key()
|> JOSE.JWK.to_map()
|> elem(1)
|> thumbprint_jwk()
|> jwk_add_fields(key_fields)
end
defp load_key({:pem_file, path, key_fields}, state) do
jwk_priv =
path
|> JOSE.JWK.from_pem_file()
|> JOSE.JWK.to_map()
|> elem(1)
|> thumbprint_jwk()
|> jwk_add_fields(key_fields)
if state[:delete_on_load], do: File.rm!(path)
jwk_priv
end
defp load_key({:der_file, path, key_fields}, state) do
jwk_priv =
path
|> JOSE.JWK.from_der_file()
|> JOSE.JWK.to_map()
|> elem(1)
|> thumbprint_jwk()
|> jwk_add_fields(key_fields)
if state[:delete_on_load], do: File.rm!(path)
jwk_priv
end
defp load_key({:pem_env, env_var_name, key_fields}, state) do
jwk_priv =
env_var_name
|> System.fetch_env!()
|> JOSE.JWK.from_pem()
|> JOSE.JWK.to_map()
|> elem(1)
|> thumbprint_jwk()
|> jwk_add_fields(key_fields)
if state[:delete_on_load], do: System.delete_env(env_var_name)
jwk_priv
end
defp load_key({:der_env, env_var_name, key_fields}, state) do
jwk_priv =
env_var_name
|> System.fetch_env!()
|> Base.decode64!()
|> JOSE.JWK.from_der()
|> JOSE.JWK.to_map()
|> elem(1)
|> thumbprint_jwk()
|> jwk_add_fields(key_fields)
if state[:delete_on_load], do: System.delete_env(env_var_name)
jwk_priv
end
defp load_key({:map_env, env_var_name, _key_fields}, state) do
jwk_priv =
env_var_name
|> System.fetch_env!()
|> Jason.decode!()
|> thumbprint_jwk()
if state[:delete_on_load], do: System.delete_env(env_var_name)
jwk_priv
end
defp thumbprint_jwk(jwk_priv) do
thumbprint =
jwk_priv
|> JOSE.JWK.from_map()
|> JOSE.JWK.thumbprint()
Map.put(jwk_priv, "kid", thumbprint)
end
defp jwk_add_fields(jwk, fields), do: Map.merge(jwk, fields)
defp notify_new_key(jwk_pub) do
:rpc.multicall(Node.list(), __MODULE__, :register_public_key, [node(), jwk_pub])
end
end | lib/jose_virtual_hsm.ex | 0.72487 | 0.50415 | jose_virtual_hsm.ex | starcoder |
defmodule AWS.WorkSpaces do
@moduledoc """
Amazon WorkSpaces Service
Amazon WorkSpaces enables you to provision virtual, cloud-based Microsoft
Windows and Amazon Linux desktops for your users.
"""
@doc """
Associates the specified IP access control group with the specified
directory.
"""
def associate_ip_groups(client, input, options \\ []) do
request(client, "AssociateIpGroups", input, options)
end
@doc """
Adds one or more rules to the specified IP access control group.
This action gives users permission to access their WorkSpaces from the CIDR
address ranges specified in the rules.
"""
def authorize_ip_rules(client, input, options \\ []) do
request(client, "AuthorizeIpRules", input, options)
end
@doc """
Copies the specified image from the specified Region to the current Region.
"""
def copy_workspace_image(client, input, options \\ []) do
request(client, "CopyWorkspaceImage", input, options)
end
@doc """
Creates an IP access control group.
An IP access control group provides you with the ability to control the IP
addresses from which users are allowed to access their WorkSpaces. To
specify the CIDR address ranges, add rules to your IP access control group
and then associate the group with your directory. You can add rules when
you create the group or at any time using `AuthorizeIpRules`.
There is a default IP access control group associated with your directory.
If you don't associate an IP access control group with your directory, the
default group is used. The default group includes a default rule that
allows users to access their WorkSpaces from anywhere. You cannot modify
the default IP access control group for your directory.
"""
def create_ip_group(client, input, options \\ []) do
request(client, "CreateIpGroup", input, options)
end
@doc """
Creates the specified tags for the specified WorkSpaces resource.
"""
def create_tags(client, input, options \\ []) do
request(client, "CreateTags", input, options)
end
@doc """
Creates one or more WorkSpaces.
This operation is asynchronous and returns before the WorkSpaces are
created.
"""
def create_workspaces(client, input, options \\ []) do
request(client, "CreateWorkspaces", input, options)
end
@doc """
Deletes the specified IP access control group.
You cannot delete an IP access control group that is associated with a
directory.
"""
def delete_ip_group(client, input, options \\ []) do
request(client, "DeleteIpGroup", input, options)
end
@doc """
Deletes the specified tags from the specified WorkSpaces resource.
"""
def delete_tags(client, input, options \\ []) do
request(client, "DeleteTags", input, options)
end
@doc """
Deletes the specified image from your account. To delete an image, you must
first delete any bundles that are associated with the image and unshare the
image if it is shared with other accounts.
"""
def delete_workspace_image(client, input, options \\ []) do
request(client, "DeleteWorkspaceImage", input, options)
end
@doc """
Deregisters the specified directory. This operation is asynchronous and
returns before the WorkSpace directory is deregistered. If any WorkSpaces
are registered to this directory, you must remove them before you can
deregister the directory.
"""
def deregister_workspace_directory(client, input, options \\ []) do
request(client, "DeregisterWorkspaceDirectory", input, options)
end
@doc """
Retrieves a list that describes the configuration of Bring Your Own License
(BYOL) for the specified account.
"""
def describe_account(client, input, options \\ []) do
request(client, "DescribeAccount", input, options)
end
@doc """
Retrieves a list that describes modifications to the configuration of Bring
Your Own License (BYOL) for the specified account.
"""
def describe_account_modifications(client, input, options \\ []) do
request(client, "DescribeAccountModifications", input, options)
end
@doc """
Retrieves a list that describes one or more specified Amazon WorkSpaces
clients.
"""
def describe_client_properties(client, input, options \\ []) do
request(client, "DescribeClientProperties", input, options)
end
@doc """
Describes one or more of your IP access control groups.
"""
def describe_ip_groups(client, input, options \\ []) do
request(client, "DescribeIpGroups", input, options)
end
@doc """
Describes the specified tags for the specified WorkSpaces resource.
"""
def describe_tags(client, input, options \\ []) do
request(client, "DescribeTags", input, options)
end
@doc """
Retrieves a list that describes the available WorkSpace bundles.
You can filter the results using either bundle ID or owner, but not both.
"""
def describe_workspace_bundles(client, input, options \\ []) do
request(client, "DescribeWorkspaceBundles", input, options)
end
@doc """
Describes the available directories that are registered with Amazon
WorkSpaces.
"""
def describe_workspace_directories(client, input, options \\ []) do
request(client, "DescribeWorkspaceDirectories", input, options)
end
@doc """
Describes the permissions that the owner of an image has granted to other
AWS accounts for an image.
"""
def describe_workspace_image_permissions(client, input, options \\ []) do
request(client, "DescribeWorkspaceImagePermissions", input, options)
end
@doc """
Retrieves a list that describes one or more specified images, if the image
identifiers are provided. Otherwise, all images in the account are
described.
"""
def describe_workspace_images(client, input, options \\ []) do
request(client, "DescribeWorkspaceImages", input, options)
end
@doc """
Describes the snapshots for the specified WorkSpace.
"""
def describe_workspace_snapshots(client, input, options \\ []) do
request(client, "DescribeWorkspaceSnapshots", input, options)
end
@doc """
Describes the specified WorkSpaces.
You can filter the results by using the bundle identifier, directory
identifier, or owner, but you can specify only one filter at a time.
"""
def describe_workspaces(client, input, options \\ []) do
request(client, "DescribeWorkspaces", input, options)
end
@doc """
Describes the connection status of the specified WorkSpaces.
"""
def describe_workspaces_connection_status(client, input, options \\ []) do
request(client, "DescribeWorkspacesConnectionStatus", input, options)
end
@doc """
Disassociates the specified IP access control group from the specified
directory.
"""
def disassociate_ip_groups(client, input, options \\ []) do
request(client, "DisassociateIpGroups", input, options)
end
@doc """
Imports the specified Windows 7 or Windows 10 Bring Your Own License (BYOL)
image into Amazon WorkSpaces. The image must be an already licensed EC2
image that is in your AWS account, and you must own the image.
"""
def import_workspace_image(client, input, options \\ []) do
request(client, "ImportWorkspaceImage", input, options)
end
@doc """
Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that
you can use for the network management interface when you enable Bring Your
Own License (BYOL).
The management network interface is connected to a secure Amazon WorkSpaces
management network. It is used for interactive streaming of the WorkSpace
desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to
manage the WorkSpace.
"""
def list_available_management_cidr_ranges(client, input, options \\ []) do
request(client, "ListAvailableManagementCidrRanges", input, options)
end
@doc """
Migrates a WorkSpace from one operating system or bundle type to another,
while retaining the data on the user volume.
The migration process recreates the WorkSpace by using a new root volume
from the target bundle image and the user volume from the last available
snapshot of the original WorkSpace. During migration, the original
`D:\Users\%USERNAME%` user profile folder is renamed to
`D:\Users\%USERNAME%MMddyyTHHmmss%.NotMigrated`. A new
`D:\Users\%USERNAME%\` folder is generated by the new OS. Certain files in
the old user profile are moved to the new user profile.
For available migration scenarios, details about what happens during
migration, and best practices, see [Migrate a
WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/migrate-workspaces.html).
"""
def migrate_workspace(client, input, options \\ []) do
request(client, "MigrateWorkspace", input, options)
end
@doc """
Modifies the configuration of Bring Your Own License (BYOL) for the
specified account.
"""
def modify_account(client, input, options \\ []) do
request(client, "ModifyAccount", input, options)
end
@doc """
Modifies the properties of the specified Amazon WorkSpaces clients.
"""
def modify_client_properties(client, input, options \\ []) do
request(client, "ModifyClientProperties", input, options)
end
@doc """
Modifies the self-service WorkSpace management capabilities for your users.
For more information, see [Enable Self-Service WorkSpace Management
Capabilities for Your
Users](https://docs.aws.amazon.com/workspaces/latest/adminguide/enable-user-self-service-workspace-management.html).
"""
def modify_selfservice_permissions(client, input, options \\ []) do
request(client, "ModifySelfservicePermissions", input, options)
end
@doc """
Specifies which devices and operating systems users can use to access their
WorkSpaces. For more information, see [ Control Device
Access](https://docs.aws.amazon.com/workspaces/latest/adminguide/update-directory-details.html#control-device-access).
"""
def modify_workspace_access_properties(client, input, options \\ []) do
request(client, "ModifyWorkspaceAccessProperties", input, options)
end
@doc """
Modify the default properties used to create WorkSpaces.
"""
def modify_workspace_creation_properties(client, input, options \\ []) do
request(client, "ModifyWorkspaceCreationProperties", input, options)
end
@doc """
Modifies the specified WorkSpace properties. For important information
about how to modify the size of the root and user volumes, see [ Modify a
WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/modify-workspaces.html).
"""
def modify_workspace_properties(client, input, options \\ []) do
request(client, "ModifyWorkspaceProperties", input, options)
end
@doc """
Sets the state of the specified WorkSpace.
To maintain a WorkSpace without being interrupted, set the WorkSpace state
to `ADMIN_MAINTENANCE`. WorkSpaces in this state do not respond to requests
to reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this
state is not stopped. Users cannot log into a WorkSpace in the
`ADMIN_MAINTENANCE` state.
"""
def modify_workspace_state(client, input, options \\ []) do
request(client, "ModifyWorkspaceState", input, options)
end
@doc """
Reboots the specified WorkSpaces.
You cannot reboot a WorkSpace unless its state is `AVAILABLE` or
`UNHEALTHY`.
This operation is asynchronous and returns before the WorkSpaces have
rebooted.
"""
def reboot_workspaces(client, input, options \\ []) do
request(client, "RebootWorkspaces", input, options)
end
@doc """
Rebuilds the specified WorkSpace.
You cannot rebuild a WorkSpace unless its state is `AVAILABLE`, `ERROR`,
`UNHEALTHY`, `STOPPED`, or `REBOOTING`.
Rebuilding a WorkSpace is a potentially destructive action that can result
in the loss of data. For more information, see [Rebuild a
WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/reset-workspace.html).
This operation is asynchronous and returns before the WorkSpaces have been
completely rebuilt.
"""
def rebuild_workspaces(client, input, options \\ []) do
request(client, "RebuildWorkspaces", input, options)
end
@doc """
Registers the specified directory. This operation is asynchronous and
returns before the WorkSpace directory is registered. If this is the first
time you are registering a directory, you will need to create the
workspaces_DefaultRole role before you can register a directory. For more
information, see [ Creating the workspaces_DefaultRole
Role](https://docs.aws.amazon.com/workspaces/latest/adminguide/workspaces-access-control.html#create-default-role).
"""
def register_workspace_directory(client, input, options \\ []) do
request(client, "RegisterWorkspaceDirectory", input, options)
end
@doc """
Restores the specified WorkSpace to its last known healthy state.
You cannot restore a WorkSpace unless its state is ` AVAILABLE`, `ERROR`,
`UNHEALTHY`, or `STOPPED`.
Restoring a WorkSpace is a potentially destructive action that can result
in the loss of data. For more information, see [Restore a
WorkSpace](https://docs.aws.amazon.com/workspaces/latest/adminguide/restore-workspace.html).
This operation is asynchronous and returns before the WorkSpace is
completely restored.
"""
def restore_workspace(client, input, options \\ []) do
request(client, "RestoreWorkspace", input, options)
end
@doc """
Removes one or more rules from the specified IP access control group.
"""
def revoke_ip_rules(client, input, options \\ []) do
request(client, "RevokeIpRules", input, options)
end
@doc """
Starts the specified WorkSpaces.
You cannot start a WorkSpace unless it has a running mode of `AutoStop` and
a state of `STOPPED`.
"""
def start_workspaces(client, input, options \\ []) do
request(client, "StartWorkspaces", input, options)
end
@doc """
Stops the specified WorkSpaces.
You cannot stop a WorkSpace unless it has a running mode of `AutoStop` and
a state of `AVAILABLE`, `IMPAIRED`, `UNHEALTHY`, or `ERROR`.
"""
def stop_workspaces(client, input, options \\ []) do
request(client, "StopWorkspaces", input, options)
end
@doc """
Terminates the specified WorkSpaces.
Terminating a WorkSpace is a permanent action and cannot be undone. The
user's data is destroyed. If you need to archive any user data, contact
Amazon Web Services before terminating the WorkSpace.
You can terminate a WorkSpace that is in any state except `SUSPENDED`.
This operation is asynchronous and returns before the WorkSpaces have been
completely terminated.
"""
def terminate_workspaces(client, input, options \\ []) do
request(client, "TerminateWorkspaces", input, options)
end
@doc """
Replaces the current rules of the specified IP access control group with
the specified rules.
"""
def update_rules_of_ip_group(client, input, options \\ []) do
request(client, "UpdateRulesOfIpGroup", input, options)
end
@doc """
Shares or unshares an image with one account by specifying whether that
account has permission to copy the image. If the copy image permission is
granted, the image is shared with that account. If the copy image
permission is revoked, the image is unshared with the account.
<note> <ul> <li> To delete an image that has been shared, you must unshare
the image before you delete it.
</li> <li> Sharing Bring Your Own License (BYOL) images across AWS accounts
isn't supported at this time in the AWS GovCloud (US-West) Region. To share
BYOL images across accounts in the AWS GovCloud (US-West) Region, contact
AWS Support.
</li> </ul> </note>
"""
def update_workspace_image_permission(client, input, options \\ []) do
request(client, "UpdateWorkspaceImagePermission", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "workspaces"}
host = build_host("workspaces", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "WorkspacesService.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end | lib/aws/work_spaces.ex | 0.875095 | 0.439386 | work_spaces.ex | starcoder |
defmodule SpandexPhoenix do
@moduledoc """
A Plug wrapper for use in a Plug.Router or Phoenix.Endpoint to trace the entire request with Spandex.
> NOTE: If you want to `use` this in combination with `Plug.ErrorHandler` or
similar "wrapper" plugs, this one should be last so that it traces the
effects of the other wrappers.
Phoenix integration:
```elixir
defmodule MyAppWeb.Endpoint do
use Phoenix.Endpoint, otp_app: :my_app
use SpandexPhoenix
# ...
end
```
Plug integration:
```elixir
defmodule MyApp.Router do
use Plug.Router
use SpandexPhoenix
# ...
end
```
## Options for `use` Macro
* `:filter_traces` (arity-1 function reference)
A function that takes a `Plug.Conn` and returns `true` for requests to be
traced. For example, to only trace certain HTTP methods, you could do
something like:
```elixir
defmodule MyAppWeb.Endpoint do
use Phoenix.Endpoint, otp_app: :my_app
use SpandexPhoenix, filter_traces: &__MODULE__.filter_traces/1
def filter_traces(conn) do
conn.method in ~w(DELETE GET POST PUT)
end
end
```
> NOTE: Local references to functions in the module being defined (e.g.
`&function/1`) will not work because the module will not be compiled yet
when the function is being referenced, so the function does not exist.
Referencing the local function using `&__MODULE__.function/1` will work,
however.
Default: (a private function that always returns `true`)
* `:span_name` (`String`)
The name to be used for the top level span.
Default: `“request”`
* `:tracer` (`Atom`)
The tracing module to be used for the trace.
Default: `Application.get_env(:spandex_phoenix, :tracer)`
* `:customize_metadata` (arity-1 function reference)
A function that takes the `Plug.Conn` for the current request and returns
the desired span options to apply to the top-level span in the trace (as a
`Keyword`). The `Plug.Conn` is normally evaluated just before the response
is sent to the client, to ensure that the most-accurate metadata can be
collected. In cases where there is an unhandled error, it may only
represent the initial request without any response information.
For example, if you want a particular path parameter to show its value in
the `resource` instead of its name, you should do something like:
```elixir
defmodule MyApp.Tracer do
use Spandex.Tracer, otp_app: :my_app
def customize_metadata(conn) do
name = conn.path_params["name"] || ""
conn
|> SpandexPhoenix.default_metadata()
|> Keyword.update(:resource, "", &String.replace(&1, ":name", name))
end
end
defmodule MyAppWeb.Endpoint do
use Phoenix.Endpoint, otp_app: :my_app
use SpandexPhoenix, customize_metadata: &MyApp.Tracer.customize_metadata/1
plug Router
end
```
> NOTE: Local references to functions in the module being defined (e.g.
`&function/1`) will not work because the module will not be compiled yet
when the function is being referenced, so the function does not exist.
Referencing the local function using `&__MODULE__.function/1` will work,
however.
Default: `&SpandexPhoenix.default_metadata/1`
"""
alias SpandexPhoenix.Plug.{
AddContext,
FinishTrace,
StartTrace
}
defmacro __using__(opts) do
tracer = Keyword.get(opts, :tracer, Application.get_env(:spandex_phoenix, :tracer))
if is_nil(tracer), do: raise("You must configure a :tracer for :spandex_phoenix")
opts = Keyword.put(opts, :tracer, tracer)
start_opts = Keyword.take(opts, [:filter_traces, :span_name, :tracer])
context_opts = Keyword.take(opts, [:customize_metadata, :tracer])
finish_opts = Keyword.take(opts, [:tracer])
quote location: :keep,
bind_quoted: [
use_opts: opts,
tracer: tracer,
start_opts: start_opts,
context_opts: context_opts,
finish_opts: finish_opts
] do
@before_compile SpandexPhoenix
@use_opts use_opts
@tracer tracer
@start_opts StartTrace.init(start_opts)
@context_opts AddContext.init(context_opts)
@finish_opts FinishTrace.init(finish_opts)
end
end
defmacro __before_compile__(_env) do
quote location: :keep do
defoverridable call: 2
def call(conn, opts) do
try do
conn
|> StartTrace.call(@start_opts)
|> Plug.Conn.register_before_send(&AddContext.call(&1, @context_opts))
|> super(opts)
rescue
error in Plug.Conn.WrapperError ->
SpandexPhoenix.handle_errors(error, @tracer, @context_opts, @finish_opts)
catch
kind, reason ->
error = %{conn: conn, kind: kind, reason: reason, stack: __STACKTRACE__}
SpandexPhoenix.handle_errors(error, @tracer, @context_opts, @finish_opts)
else
conn ->
FinishTrace.call(conn, @finish_opts)
end
end
end
end
@doc """
"""
@spec default_metadata(Plug.Conn.t()) :: Keyword.t()
def default_metadata(conn) do
conn = Plug.Conn.fetch_query_params(conn)
route = route_name(conn)
user_agent =
conn
|> Plug.Conn.get_req_header("user-agent")
|> List.first()
method = String.upcase(conn.method)
[
http: [
method: method,
query_string: conn.query_string,
status_code: conn.status,
url: URI.decode(conn.request_path),
user_agent: user_agent
],
resource: method <> " " <> route,
type: :web
]
end
@spec trace_all_requests(Plug.Conn.t()) :: true
@doc "Default implementation of the filter_traces function"
def trace_all_requests(_conn), do: true
@already_sent {:plug_conn, :sent}
@doc false
def handle_errors(error, tracer, context_opts, finish_opts) do
%{conn: conn, kind: kind, reason: reason, stack: stack} = error
# If the response has already been sent, `AddContext` has already been called.
# If not, we need to call it here to set the request metadata.
conn =
receive do
@already_sent ->
# Make sure we put this back in the mailbox for others.
send(self(), @already_sent)
conn
after
0 ->
AddContext.call(conn, context_opts)
end
exception =
case kind do
:error -> Exception.normalize(kind, reason, stack)
_ -> %RuntimeError{message: Exception.format_banner(kind, reason)}
end
mark_span_as_error(tracer, exception, stack)
FinishTrace.call(conn, finish_opts)
:erlang.raise(kind, reason, stack)
end
@doc false
def mark_span_as_error(tracer, %{__struct__: Phoenix.Router.NoRouteError, __exception__: true}, _stack) do
tracer.update_span(resource: "Not Found")
end
def mark_span_as_error(_tracer, %{__struct__: Plug.Parsers.UnsupportedMediaTypeError, __exception__: true}, _stack),
do: nil
def mark_span_as_error(tracer, exception, stack) do
tracer.span_error(exception, stack)
tracer.update_span(error: [error?: true])
end
# Private Helpers
# Set by Plug.Router
defp route_name(%Plug.Conn{private: %{plug_route: {route, _fn}}}), do: route
# Phoenix doesn't set the plug_route for us, so we have to figure it out ourselves
defp route_name(%Plug.Conn{path_params: path_params, path_info: path_info}) do
"/" <> Enum.map_join(path_info, "/", &replace_path_param_with_name(path_params, &1))
end
defp replace_path_param_with_name(path_params, path_component) do
decoded_component = URI.decode(path_component)
Enum.find_value(path_params, decoded_component, fn
{param_name, ^decoded_component} -> ":#{param_name}"
_ -> nil
end)
end
end | lib/spandex_phoenix.ex | 0.856152 | 0.782912 | spandex_phoenix.ex | starcoder |
defmodule Multipipe do
@moduledoc """
Macros to augment the default pipe, allowing multiple parameter pipes and
pipes into arbitrary inputs.
Our first example of using multiple parameter pipes sets the first parameter
as "Hello", the second as "World", and pipes them into the string
concatenation function `Kernel.<>`.
iex> param(1, "Hello") |> param(2, "World")
...> |> useparams(Kernel.<>)
"HelloWorld"
The order of specifying the parameters doesn't matter:
iex> param(2, "World") |> param(1, "Hello")
...> |> useparams(Kernel.<>)
"HelloWorld"
The statement `param(i, value)` means "use `value` as parameter number `i`". The
syntax must be given as `param(i, value)` or, as we'll see below, `value |> param(i, _)`.
Once you start collecting parameters with `param` you must either continue
piping into further `param` statements to collect more parameters, or into a
`useparams` statement to use them.
If you want to use the output of a pipe (or any other value that can can be
piped) as a parameter, piping into a parameter statement is supported by using
an underscore:
iex> "olleH" |> String.reverse
...> |> param(1, _)
...> |> param(2, "World")
...> |> useparams(Kernel.<>)
"HelloWorld"
Partial parameters are also supported, as long as the other parameters are
supplied in the function call. This allows for piping into arbitrary inputs:
iex> param(1, "Hello") |> useparams(Kernel.<>("World"))
"HelloWorld"
iex> param(2, "Hello") |> useparams(Kernel.<>("World"))
"WorldHello"
"""
defp expand(x) do
x |> Macro.postwalk(fn(x) -> x |> Macro.expand(__ENV__) end)
end
@doc """
Collects parameters, which are applied with `useparams`.
The usage syntax is
`param(index, value)`
to create a new set of parameters with the given value for the given index, or
`param(params, index, value)` to take an existing collection of parameters
and set the given index to `value`.
It is intended to be used with the Elixir pipe, to allow multiple parameter
pipes in conjunction with `useparams`:
iex> param(1, "Hello") |> param(2, "World") |> useparams(Kernel.<>)
"HelloWorld"
To allow parameter collection to start in the middle of a pipeline, there is
`param(value, index, _)`
provided as a shorthand for `param(index, value)`. For instance:
iex> "olleH" |> String.reverse
...> |> param(1, _)
...> |> param(2, "World")
...> |> useparams(Kernel.<>)
"HelloWorld"
Parameters collected by `param` should always be terminated by piping them into
a `useparams` statement.
See the module docs for further usage examples.
"""
defmacro param(params \\ {:%{}, [], []}, index, value)
# If a value is piped into a param statement with an underscore, replace the
# underscore with the value.
defmacro param(value, index, {:_, _, _}) do
quote do
param(unquote(index), unquote(value))
end
end
# Otherwise, it's assumed the value piped into the statement is already a set
# of parameters, which is a map. In this case, add the new `index => value`
# to the map, deleting any value already associated to `index` if it exists.
defmacro param({:%{}, meta, list}, index, value) do
list = List.keydelete(list, index, 0)
quote do
unquote({:%{}, meta, list ++ [{index, value}]})
end
end
# For expanding the macro. Is there a way to avoid needing this?
defmacro param({:param, _, _} = x, y, z) do
quote do
param(unquote(x |> expand), unquote(y |> expand), unquote(z |> expand))
end
end
@doc """
Applies a set of parameters collected with `param` statements to a function.
The usage syntax is
`useparams(params, function_call)`
where `params` is a collection of parameters assembled by `param` statements,
and `function_call` is a (possibly partially applied) call to a function, that
is, anything you could normally pipe into with the default Elixir pipe `|>`.
It is intended to be used with the Elixir pipe, for terminating a series of
`param` statements.
See the docs for `Multipipe.param/3` and the module docs for usage examples.
"""
# If the list of parameters is empty, return the function statement.
defmacro useparams({:%{}, _, []}, func) do
func
end
# Otherwise, find the lowest index parameter and add it to the function call.
defmacro useparams({:%{}, meta, list}, partial) do
{index, value} = list |> Enum.min
partial = Macro.pipe(value, partial, index - 1)
quote do
useparams(unquote({:%{}, meta, list -- [{index, value}]}), unquote(partial))
end
end
# Expand the param macros so we can access the parameters as maps instead of
# nested ASTs.
defmacro useparams({:param, _, _} = x, partial) do
quote do
useparams(unquote(x |> expand), unquote(partial))
end
end
@doc """
Pipe the input value into a specified parameter of a function call.
Example usage:
# function call: String.contains?("foobar", "bar")
iex> "bar" |> as_param(2, String.contains?("foobar"))
true
"""
defmacro as_param(value, index, func) do
quote do
unquote(Macro.pipe(value, func, index - 1))
end
end
end | lib/multipipe.ex | 0.89019 | 0.759292 | multipipe.ex | starcoder |
defmodule Rox.Batch do
@moduledoc """
Module for performing atomic write operations on a database.
"""
alias Rox.{DB, ColumnFamily, Utils, Native}
alias __MODULE__
@typedoc "A reference to a batch operation"
@type t :: %__MODULE__{operations: [op]}
defstruct operations: []
@typep op ::
{:put, {key :: binary, value :: binary}}
| {:put_cf, {ColumnFamily.t(), key :: binary, value :: binary}}
| {:delete, key :: binary}
| {:delete_cf, {ColumnFamily.t(), key :: binary, value :: binary}}
@doc """
Creates a new `Batch` operation
"""
@spec new :: t
def new do
%Batch{}
end
@doc """
Returns a new `Batch` with a put operation scheduled.
"""
@spec put(t, Rox.key(), Rox.value(), Rox.write_options()) :: t
def put(%Batch{operations: ops} = batch, key, value, write_opts \\ []) when is_binary(key) do
%{batch | operations: [{:put, {key, Utils.encode(value, write_opts)}} | ops]}
end
@doc """
Returns a new `Batch` with a put operation scheduled for the `column_family`.
"""
@spec put_cf(t, ColumnFamily.t(), Rox.key(), Rox.value(), Rox.write_options()) :: t
def put_cf(%Batch{operations: ops} = batch, cf, key, value, write_opts \\ [])
when is_binary(key) do
%{batch | operations: [{:put_cf, {cf, key, Utils.encode(value, write_opts)}} | ops]}
end
@doc """
Schedules a delete operation in the `batch`.
"""
@spec delete(t, Rox.key()) :: t
def delete(%Batch{operations: ops} = batch, key) when is_binary(key) do
%{batch | operations: [{:delete, key} | ops]}
end
@doc """
Schedules a delete operation in the `batch` for `key` in `column_family`.
"""
@spec delete(t, ColumnFamily.t(), Rox.key()) :: t
def delete(%Batch{operations: ops} = batch, cf, key) when is_binary(key) do
%{batch | operations: [{:delete_cf, {cf, key}} | ops]}
end
@doc """
Atomically commits the operations in the `batch` to the `db`.
"""
@spec write(t, DB.t()) :: :ok | {:error, reason :: any}
def write(%Batch{operations: ops}, %DB{resource: db}) do
ops |> Enum.reverse() |> Native.batch_write(db)
end
@doc """
Merges a list of `Batch.t` into a single `Batch.t`.
"""
@spec merge([t]) :: t
def merge(batches) do
batches
|> Enum.reduce(Batch.new(), fn %Batch{operations: ops}, %Batch{operations: merge_ops} = acc ->
%{acc | operations: Enum.concat(ops, merge_ops)}
end)
end
end | lib/rox/batch.ex | 0.913941 | 0.447219 | batch.ex | starcoder |
defmodule Mux.Data.Metrics do
@moduledoc """
This module provides functions that interact with the `metrics` endpoints, which includes a bulk
of the data product's statistical data.
Note, these API documentation links may break periodically as we update documentation titles.
- [Breakdowns](https://docs.mux.com/api-reference/data#operation/list-breakdown-values)
- [Comparison](https://docs.mux.com/api-reference/data#operation/list-all-metric-values)
- [Insights](https://docs.mux.com/api-reference/data#operation/list-insights)
- [Overall](https://docs.mux.com/api-reference/data#operation/get-overall-values)
- [Timeseries](https://docs.mux.com/api-reference/data#operation/get-metric-timeseries-data)
"""
alias Mux.{Base, Fixtures}
@doc """
List the breakdown values for a specific metric.
Returns `{:ok, breakdowns, raw_env}`.
## Examples
iex> client = Mux.client("my_token_id", "my_token_secret")
iex> {:ok, breakdowns, _env} = Mux.Data.Metrics.breakdown(client, "video_startup_time", "browser")
iex> breakdowns
#{inspect(Fixtures.breakdown()["data"])}
iex> client = Mux.client("my_token_id", "my_token_secret")
iex> {:ok, breakdowns, _env} = Mux.Data.Metrics.breakdown(client, "video_startup_time", "browser", measurement: "median", timeframe: ["6:hours"])
iex> breakdowns
#{inspect(Fixtures.breakdown()["data"])}
"""
def breakdown(client, metric, group_by, params \\ []) do
params = Keyword.merge([group_by: group_by], params)
Base.get(client, build_base_path(metric) <> "/breakdown", query: params)
end
@doc """
List all of the values across every breakdown for a specific breakdown value.
Returns `{:ok, comparisons, raw_env}`.
## Examples
iex> client = Mux.client("my_token_id", "my_token_secret")
iex> {:ok, comparison, _env} = Mux.Data.Metrics.comparison(client, "browser", "Safari")
iex> comparison
#{inspect(Fixtures.comparison()["data"])}
"""
def comparison(client, dimension, value, params \\ []) do
params = Keyword.merge([dimension: dimension, value: value], params)
Base.get(client, build_base_path() <> "/comparison", query: params)
end
@doc """
Returns a list of insights for a metric. These are the worst performing values across all breakdowns
sorted by how much they negatively impact a specific metric.
Returns `{:ok, insights, raw_env}`.
## Examples
iex> client = Mux.client("my_token_id", "my_token_secret")
iex> {:ok, insights, _env} = Mux.Data.Metrics.insights(client, "video_startup_time")
iex> insights
#{inspect(Fixtures.insights()["data"])}
"""
def insights(client, metric, params \\ []) do
Base.get(client, build_base_path(metric) <> "/insights", query: params)
end
@doc """
Returns the overall value for a specific metric, as well as the total view count, watch time, and
the Mux Global metric value for the metric.
Returns `{:ok, overall_values, raw_env}`.
## Examples
iex> client = Mux.client("my_token_id", "my_token_secret")
iex> {:ok, insights, _env} = Mux.Data.Metrics.overall(client, "video_startup_time")
iex> insights
#{inspect(Fixtures.overall()["data"])}
"""
def overall(client, metric, params \\ []) do
Base.get(client, build_base_path(metric) <> "/overall", query: params)
end
@doc """
Returns time series data for a given metric.
Returns `{:ok, timeseries, raw_env}`.
## Examples
iex> client = Mux.client("my_token_id", "my_token_secret")
iex> {:ok, timeseries, _env} = Mux.Data.Metrics.timeseries(client, "video_startup_time")
iex> timeseries
#{inspect(Fixtures.timeseries()["data"])}
"""
def timeseries(client, metric, params \\ []) do
Base.get(client, build_base_path(metric) <> "/timeseries", query: params)
end
defp build_base_path(), do: "/data/v1/metrics"
defp build_base_path(metric), do: build_base_path() <> "/#{metric}"
end | lib/mux/data/metrics.ex | 0.894797 | 0.594051 | metrics.ex | starcoder |
defmodule Integer do
@moduledoc """
Functions for working with integers.
"""
import Bitwise
@doc """
Determines if `integer` is odd.
"""
defguard is_odd(integer) when is_integer(integer) and (integer &&& 1) == 1
@doc """
Determines if an `integer` is even.
"""
defguard is_even(integer) when is_integer(integer) and (integer &&& 1) == 0
@doc """
Computes `base` raised to power of `exponent`.
"""
@doc since: "1.12.0"
@spec pow(integer, non_neg_integer) :: integer
def pow(base, exponent) when is_integer(base) and is_integer(exponent) do
if exponent < 0, do: :erlang.error(:badarith, [base, exponent])
guarded_pow(base, exponent)
end
# https://en.wikipedia.org/wiki/Exponentiation_by_squaring
defp guarded_pow(_, 0), do: 1
defp guarded_pow(b, 1), do: b
defp guarded_pow(b, e) when (e &&& 1) == 0, do: guarded_pow(b * b, e >>> 1)
defp guarded_pow(b, e), do: b * guarded_pow(b * b, e >>> 1)
@doc """
Computes the modulo remainder of an integer division.
"""
@doc since: "1.4.0"
@spec mod(integer, neg_integer | pos_integer) :: integer
def mod(dividend, divisor) do
remainder = rem(dividend, divisor)
if remainder * divisor < 0 do
remainder + divisor
else
remainder
end
end
@doc """
Performs a floored integer division.
"""
@doc since: "1.4.0"
@spec floor_div(integer, neg_integer | pos_integer) :: integer
def floor_div(dividend, divisor) do
if dividend * divisor < 0 and rem(dividend, divisor) != 0 do
div(dividend, divisor) - 1
else
div(dividend, divisor)
end
end
@doc """
Returns the ordered digits for the given `integer`.
"""
@spec digits(integer, pos_integer) :: [integer, ...]
def digits(integer, base \\ 10)
when is_integer(integer) and is_integer(base) and base >= 2 do
do_digits(integer, base, [])
end
defp do_digits(integer, base, acc) when abs(integer) < base, do: [integer | acc]
defp do_digits(integer, base, acc),
do: do_digits(div(integer, base), base, [rem(integer, base) | acc])
@doc """
Returns the integer represented by the ordered `digits`.
"""
@spec undigits([integer], pos_integer) :: integer
def undigits(digits, base \\ 10) when is_list(digits) and is_integer(base) and base >= 2 do
do_undigits(digits, base, 0)
end
defp do_undigits([], _base, acc), do: acc
defp do_undigits([digit | _], base, _) when is_integer(digit) and digit >= base,
do: raise(ArgumentError, "invalid digit #{digit} in base #{base}")
defp do_undigits([digit | tail], base, acc) when is_integer(digit),
do: do_undigits(tail, base, acc * base + digit)
@doc """
Parses a text representation of an integer.
"""
@spec parse(binary, 2..36) :: {integer, binary} | :error
def parse(binary, base \\ 10)
def parse(_binary, base) when base not in 2..36 do
raise ArgumentError, "invalid base #{inspect(base)}"
end
def parse(binary, base) when is_binary(binary) do
case count_digits(binary, base) do
0 ->
:error
count ->
{digits, rem} = :erlang.split_binary(binary, count)
{:erlang.binary_to_integer(digits, base), rem}
end
end
defp count_digits(<<sign, rest::bits>>, base) when sign in '+-' do
case count_digits_nosign(rest, base, 1) do
1 -> 0
count -> count
end
end
defp count_digits(<<rest::bits>>, base) do
count_digits_nosign(rest, base, 0)
end
digits = [{?0..?9, -?0}, {?A..?Z, 10 - ?A}, {?a..?z, 10 - ?a}]
for {chars, diff} <- digits,
char <- chars do
digit = char + diff
defp count_digits_nosign(<<unquote(char), rest::bits>>, base, count)
when base > unquote(digit) do
count_digits_nosign(rest, base, count + 1)
end
end
defp count_digits_nosign(<<_::bits>>, _, count), do: count
# TODO: Remove Integer.to_string/1 once the minimum supported version is
# Erlang/OTP 22, since it is covered by the now BIF Integer.to_string/2.
# Please reapply commit <PASSWORD>.
@doc """
Returns a binary which corresponds to the text representation
of `integer`.
"""
@spec to_string(integer) :: String.t()
def to_string(integer) do
:erlang.integer_to_binary(integer)
end
@doc """
Returns a binary which corresponds to the text representation
of `integer` in the given `base`.
"""
@spec to_string(integer, 2..36) :: String.t()
def to_string(integer, base) do
:erlang.integer_to_binary(integer, base)
end
# TODO: Remove Integer.to_charlist/1 once the minimum supported version is
# Erlang/OTP 22, since it is covered by the now BIF Integer.to_charlist/2.
# Please reapply commit <PASSWORD>.
@doc """
Returns a charlist which corresponds to the text representation of the given `integer`.
"""
@spec to_charlist(integer) :: charlist
def to_charlist(integer) do
:erlang.integer_to_list(integer)
end
@doc """
Returns a charlist which corresponds to the text representation of `integer` in the given `base`.
"""
@spec to_charlist(integer, 2..36) :: charlist
def to_charlist(integer, base) do
:erlang.integer_to_list(integer, base)
end
@doc """
Returns the greatest common divisor of the two given integers.
"""
@doc since: "1.5.0"
@spec gcd(integer, integer) :: non_neg_integer
def gcd(integer1, integer2) when is_integer(integer1) and is_integer(integer2) do
gcd_positive(abs(integer1), abs(integer2))
end
defp gcd_positive(0, integer2), do: integer2
defp gcd_positive(integer1, 0), do: integer1
defp gcd_positive(integer1, integer2), do: gcd_positive(integer2, rem(integer1, integer2))
@doc false
@deprecated "Use Integer.to_charlist/1 instead"
def to_char_list(integer), do: Integer.to_charlist(integer)
@doc false
@deprecated "Use Integer.to_charlist/2 instead"
def to_char_list(integer, base), do: Integer.to_charlist(integer, base)
end | samples/Elixir/integer.ex | 0.850282 | 0.477371 | integer.ex | starcoder |
defmodule Adventofcode.Day09MarbleMania do
use Adventofcode
alias Adventofcode.Circle
@enforce_keys [:last, :marbles, :players]
defstruct turn: 0, last: 0, marbles: nil, players: {}
def winning_score(input) do
input
|> new
|> play
end
def winning_score_times_hundred(input) do
input
|> new
|> Map.update(:last, nil, &(&1 * 100))
|> play
end
defp new(input) do
[player_count, last_marble] = parse(input)
players = 1..player_count |> Enum.map(fn _ -> 0 end) |> List.to_tuple()
marbles = Circle.new() |> Circle.insert_next(0)
%__MODULE__{last: last_marble, marbles: marbles, players: players}
end
defp play(%{turn: turn, last: turn} = state) do
state.players
|> Tuple.to_list()
|> Enum.max()
end
defp play(state) do
state
|> update_turn
|> update_marbles
|> play
end
defp update_turn(state) do
%{state | turn: posrem(state.turn + 1, state.last)}
end
defp update_marbles(%{turn: turn} = state) when rem(turn, 23) == 0 do
state
|> move_backward_seven_times
|> assign_score_to_player
|> remove_current_marble
end
defp update_marbles(state) do
move_forward_twice_and_insert_marble(state)
end
def move_backward_seven_times(state) do
marbles =
state.marbles
|> Circle.move_prev()
|> Circle.move_prev()
|> Circle.move_prev()
|> Circle.move_prev()
|> Circle.move_prev()
|> Circle.move_prev()
|> Circle.move_prev()
%{state | marbles: marbles}
end
def move_forward_twice_and_insert_marble(state) do
marbles = Circle.move_next(state.marbles)
%{state | marbles: Circle.insert_next(marbles, state.turn)}
end
def remove_current_marble(state) do
%{state | marbles: Circle.remove_current(state.marbles)}
end
def assign_score_to_player(state) do
score = state.turn + Circle.current(state.marbles)
players = update_elem(state.players, player_index(state), &(&1 + score))
%{state | players: players}
end
def player_index(state) do
rem(state.turn - 1, tuple_size(state.players))
end
defp parse(input) do
~r/\d+/
|> Regex.scan(input)
|> List.flatten()
|> Enum.map(&String.to_integer/1)
end
# 1-indexed modulo contraint
defp posrem(dividend, divisor) do
case rem(dividend - 1, divisor) + 1 do
num when num < 0 -> divisor - num
num -> num
end
end
def update_elem(tuple, index, fun) do
value = elem(tuple, index)
put_elem(tuple, index, fun.(value))
end
end | lib/day_09_marble_mania.ex | 0.721743 | 0.530297 | day_09_marble_mania.ex | starcoder |
defmodule EQRCode.ReedSolomon do
@moduledoc false
import Bitwise
@rs_block %{
# version => {error_code_len, data_code_len, remainder_len}
1 => {07, 019, 0},
2 => {10, 034, 7},
3 => {15, 055, 7},
4 => {20, 080, 7},
5 => {26, 108, 7},
6 => {18, 068, 7},
7 => {20, 078, 0}
}
@format_generator_polynomial 0b10100110111
@format_mask 0b101010000010010
@doc """
Returns generator polynomials in alpha exponent for given error code length.
Example:
iex> EQRCode.ReedSolomon.generator_polynomial(10)
[0, 251, 67, 46, 61, 118, 70, 64, 94, 32, 45]
"""
def generator_polynomial(error_code_len)
Stream.iterate({[0, 0], 1}, fn {e, i} ->
{rest, last} =
Stream.map(e, &rem(&1 + i, 255))
|> Enum.split(i)
rest =
Stream.zip(rest, tl(e))
|> Enum.map(fn {x, y} ->
(EQRCode.GaloisField.to_i(x) ^^^ EQRCode.GaloisField.to_i(y))
|> EQRCode.GaloisField.to_a()
end)
{[0] ++ rest ++ last, i + 1}
end)
|> Stream.take(32)
|> Enum.each(fn {e, i} ->
def generator_polynomial(unquote(i)), do: unquote(e)
end)
@doc """
Reed-Solomon encode.
Example:
iex> EQRCode.ReedSolomon.encode(EQRCode.Encode.encode("hello world!"))
<<64, 198, 134, 86, 198, 198, 242, 7, 118, 247, 38, 198, 66, 16,
236, 17, 236, 17, 236, 45, 99, 25, 84, 35, 114, 46>>
"""
@spec encode({integer, [0 | 1]}) :: [binary]
def encode({version, message}) do
{error_code_len, data_code_len, remainder_len} = @rs_block[version]
gen_poly = generator_polynomial(error_code_len)
data =
Stream.chunk_every(message, 8)
|> Stream.map(&String.to_integer(Enum.join(&1), 2))
|> Stream.chunk_every(data_code_len)
|> Stream.map(&{&1, polynomial_division(&1, gen_poly, data_code_len)})
|> Enum.unzip()
|> Tuple.to_list()
|> Enum.flat_map(&interleave/1)
|> :binary.list_to_bin()
<<data::binary, 0::size(remainder_len)>>
end
defp interleave(list) do
Enum.zip(list)
|> Enum.flat_map(&Tuple.to_list/1)
end
@doc """
Perform the polynomial division.
Example:
iex> EQRCode.ReedSolomon.polynomial_division([64, 198, 134, 86, 198, 198, 242, 7, 118, 247, 38, 198, 66, 16, 236, 17, 236, 17, 236], [0, 87, 229, 146, 149, 238, 102, 21], 19)
[45, 99, 25, 84, 35, 114, 46]
"""
@spec polynomial_division(list, list, integer) :: list
def polynomial_division(msg_poly, gen_poly, data_code_len) do
Stream.iterate(msg_poly, &do_polynomial_division(&1, gen_poly))
|> Enum.at(data_code_len)
end
defp do_polynomial_division([0 | t], _), do: t
defp do_polynomial_division([h | _] = msg, gen_poly) do
Stream.map(gen_poly, &rem(&1 + EQRCode.GaloisField.to_a(h), 255))
|> Enum.map(&EQRCode.GaloisField.to_i/1)
|> pad_zip(msg)
|> Enum.map(fn {a, b} -> a ^^^ b end)
|> tl()
end
defp pad_zip(left, right) do
[short, long] = Enum.sort_by([left, right], &length/1)
Stream.concat(short, Stream.cycle([0]))
|> Stream.zip(long)
end
def bch_encode(data) do
bch = do_bch_encode(EQRCode.Encode.bits(<<data::bits, 0::10>>))
(EQRCode.Encode.bits(data) ++ bch)
|> Stream.zip(EQRCode.Encode.bits(<<@format_mask::15>>))
|> Enum.map(fn {a, b} -> a ^^^ b end)
end
defp do_bch_encode(list) when length(list) == 10, do: list
defp do_bch_encode([0 | t]), do: do_bch_encode(t)
defp do_bch_encode(list) do
EQRCode.Encode.bits(<<@format_generator_polynomial::11>>)
|> Stream.concat(Stream.cycle([0]))
|> Stream.zip(list)
|> Enum.map(fn {a, b} -> a ^^^ b end)
|> do_bch_encode()
end
end | lib/eqrcode/reed_solomon.ex | 0.663124 | 0.471041 | reed_solomon.ex | starcoder |
defmodule PidController do
@moduledoc """
Documentation for PidController.
## Controller action
By default, the controller will produce a direct control action, meaning that
an increasing error term will result in an increasing control value.
If the controller action is set to `:reverse`, an increasing error term
will result in a _decreasing_ control value. (This is generally needed only
if the element being controlled, such as a valve, requires it.)
"""
@type controller_action :: :direct | :reverse
@type state :: %{required(atom()) => any()}
@doc ~S"""
Create a new instance.
"""
@spec new(keyword()) :: state()
def new(initial_values \\ []) do
%{
setpoint: 0.0,
kp: 0.0,
ki: 0.0,
kd: 0.0,
action: :direct,
output_limits: {nil, nil},
# used internally; not subject to changing by the user
error_sum: 0.0,
last_input: 0.0
}
|> set_setpoint(Keyword.get(initial_values, :setpoint))
|> set_kp(Keyword.get(initial_values, :kp))
|> set_ki(Keyword.get(initial_values, :ki))
|> set_kd(Keyword.get(initial_values, :kd))
|> set_action(Keyword.get(initial_values, :action))
|> set_output_limits(Keyword.get(initial_values, :output_limits))
end
@doc ~S"""
Returns the current setpoint for the controller.
"""
@spec setpoint(state()) :: float()
def setpoint(state), do: state.setpoint
@doc ~S"""
Returns the proportional coefficient (Kp) for the controller.
"""
@spec kp(state()) :: float()
def kp(state), do: state.kp
@doc ~S"""
Returns the integral coefficient (Ki) for the controller.
"""
@spec ki(state()) :: float()
def ki(state), do: state.ki
@doc ~S"""
Returns the derivative coefficient (Kd) for the controller.
"""
@spec kd(state()) :: float()
def kd(state), do: state.kd
@doc ~S"""
Returns the controller action (direct or reverse).
"""
@spec action(state()) :: controller_action()
def action(state), do: state.action
@doc ~S"""
Returns the range to which the control value will be limited, in the form
`{min, max}`. If either value is `nil`, the range is unbounded at that end.
"""
@spec output_limits(state()) :: {float() | nil, float() | nil}
def output_limits(state), do: state.output_limits
@doc ~S"""
Sets the setpoint for the controller. Returns the new state.
"""
@spec set_setpoint(state(), float() | nil) :: state()
def set_setpoint(state, nil), do: state
def set_setpoint(state, new_setpoint), do: %{state | setpoint: new_setpoint}
@doc ~S"""
Sets the proportional coefficient (Kp) for the controller. Returns the new state.
"""
@spec set_kp(state(), float() | nil) :: state()
def set_kp(state, nil), do: state
def set_kp(state, new_kp), do: %{state | kp: new_kp}
@doc ~S"""
Sets the integral coefficient (Ki) for the controller. Returns the new state.
"""
@spec set_ki(state(), float() | nil) :: state()
def set_ki(state, nil), do: state
def set_ki(state, new_ki), do: %{state | ki: new_ki}
@doc ~S"""
Sets the derivative coefficient (Kd) for the controller. Returns the new state.
"""
@spec set_kd(state(), float() | nil) :: state()
def set_kd(state, nil), do: state
def set_kd(state, new_kd), do: %{state | kd: new_kd}
@doc ~S"""
Sets the controller action. Returns the new state.
"""
@spec set_action(state(), controller_action()) :: state()
def set_action(state, new_action) when new_action in [:direct, :reverse],
do: %{state | action: new_action}
def set_action(state, _), do: state
@doc ~S"""
Sets the range to which the control value will be limited, in the form
`{min, max}`. If either value is `nil`, the control value will not be limited
in that direction.
"""
@spec set_output_limits(state(), {float() | nil, float() | nil}) :: state()
def set_output_limits(state, {nil, nil} = new_output_limits),
do: %{state | output_limits: new_output_limits}
def set_output_limits(state, {nil, new_max} = new_output_limits)
when is_float(new_max),
do: %{state | output_limits: new_output_limits}
def set_output_limits(state, {new_min, nil} = new_output_limits)
when is_float(new_min),
do: %{state | output_limits: new_output_limits}
def set_output_limits(state, {new_min, new_max} = new_output_limits)
when is_float(new_min) and is_float(new_max),
do: %{state | output_limits: new_output_limits}
def set_output_limits(state, _), do: state
@doc ~S"""
Calculates the control value from the current process value.
Returns `{:ok, output, state}``.
"""
@spec output(float(), state()) :: {:ok, float(), state()}
def output(input, state) do
{cv, state} = calculate_output(input, state)
{:ok, cv, state}
end
defp action_multiplier(:direct), do: 1.0
defp action_multiplier(:reverse), do: -1.0
defp clamp(value, %{output_limits: {nil, nil}}), do: value
defp clamp(value, %{output_limits: {nil, max}}) when value <= max, do: value
defp clamp(value, %{output_limits: {min, nil}}) when value >= min, do: value
defp clamp(value, %{output_limits: {min, _}}) when value < min, do: min
defp clamp(value, %{output_limits: {_, max}}) when value > max, do: max
defp calculate_output(input, state) do
error = state.setpoint - input
p_term = state.kp * action_multiplier(state.action) * error
i_term = state.ki * action_multiplier(state.action) * (state.error_sum + error)
d_term = state.kd * action_multiplier(state.action) * (input - state.last_input)
output = clamp(p_term + i_term + d_term, state)
{output, %{state | last_input: input, error_sum: clamp(state.error_sum + error, state)}}
end
end | lib/pid_controller.ex | 0.862598 | 0.536495 | pid_controller.ex | starcoder |
defmodule GrassHopper do
@moduledoc """
GrassHopper is a tiny abstraction over GenServer, that helps building
dynamically scheduled recursive processes. It allows to efficiently jump
along timestamps and perform user defined actions. Think of a librarian
going through the list of borrowed books and calling the borrowers
as their leases reach the due date.
User of GrassHopper needs to implement two callbacks:
- `next` is called to compute the next timestamp (NaiveDateTime) or nil which will cause
the process to wait indefinitely or max_timeout
- `perform`: called to perform the operation on a given interval
"""
@type state :: %{
opts: Keyword.t,
from: NaiveDateTime.t,
to: NaiveDateTime.t
}
@callback perform(state) :: any
@callback next(state) :: NaiveDateTime.t | nil
defmacro __using__(global_opts \\ []) do
quote do
use GenServer
@behaviour GrassHopper
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
@impl true
def init(local_opts \\ []) do
opts = Keyword.merge(unquote(global_opts), local_opts)
IO.inspect(opts)
timestamp = Keyword.get_lazy(opts, :start_time, fn ->
NaiveDateTime.utc_now()
end)
state = %{opts: opts, from: timestamp, to: timestamp}
{:ok, state, {:continue, []}}
end
@impl true
def handle_continue(_, state) do
now = NaiveDateTime.utc_now()
ts = __MODULE__.next(state)
timeout = if is_nil(ts) do
:infinity
else
NaiveDateTime.diff(ts, now, :millisecond)
end
case GrassHopper.trim_timeout(timeout, state.opts) do
:infinity ->
{:noreply, state}
timeout ->
new_state = %{state | to: NaiveDateTime.add(now, timeout, :millisecond)}
{:noreply, new_state, timeout}
end
end
@impl true
def handle_info(:timeout, state) do
__MODULE__.perform(state)
{:noreply, %{state | from: state.to}, {:continue, []}}
end
def handle_info(:refresh, state) do
{:noreply, state, {:continue, []}}
end
end
end
@spec refresh(atom) :: any
def refresh(dest) do
# TODO: Make it work with distributed actors
if pid = Process.whereis(dest) do
send(pid, :refresh)
end
end
@spec trim_timeout(timeout, Keyword.t) :: timeout
def trim_timeout(timeout, opts \\ []) do
case timeout do
:infinity ->
Keyword.get(opts, :max_timeout, :infinity)
timeout ->
upper = Keyword.get(opts, :min_timeout, 0) |> max(timeout)
Keyword.get(opts, :max_timeout, upper) |> min(upper)
end
end
end | lib/grass_hopper.ex | 0.754779 | 0.468183 | grass_hopper.ex | starcoder |
defmodule Geocalc.Calculator.Area do
@moduledoc false
alias Geocalc.{Calculator, Point, Shape}
@pi :math.pi()
def point_in_area?(area, point) do
coord = to_cartesian_in_plane(area, point)
geometric_function(area, coord) > 0
end
def point_outside_area?(area, point) do
coord = to_cartesian_in_plane(area, point)
geometric_function(area, coord) < 0
end
def point_at_area_border?(area, point) do
coord = to_cartesian_in_plane(area, point)
# Pretty impossible to exactly get 0, so leave a little tolerance
abs(geometric_function(area, coord)) <= 0.01
end
def point_at_center_point?(area, point) do
coord = to_cartesian_in_plane(area, point)
geometric_function(area, coord) == 1
end
@spec area_size(%Shape.Circle{} | %Shape.Rectangle{} | %Shape.Ellipse{}) :: number
def area_size(area) do
case area do
%Shape.Circle{radius: r} -> @pi * r * r
%Shape.Rectangle{long_semi_axis: a, short_semi_axis: b} -> 4 * a * b
%Shape.Ellipse{long_semi_axis: a, short_semi_axis: b} -> @pi * a * b
end
end
defp to_cartesian_in_plane(area, point) do
# Switch coordinates to radian
origin_lat = Calculator.degrees_to_radians(Point.latitude(area))
origin_lon = Calculator.degrees_to_radians(Point.longitude(area))
point_lat = Calculator.degrees_to_radians(Point.latitude(point))
point_lon = Calculator.degrees_to_radians(Point.longitude(point))
# Get earth radius for origin and position
origin_radius = Calculator.earth_radius(Point.latitude(area))
point_radius = Calculator.earth_radius(Point.latitude(point))
# Project coordinates onto cartesian plane
xo = origin_radius * :math.cos(origin_lat) * :math.cos(origin_lon)
yo = origin_radius * :math.cos(origin_lat) * :math.sin(origin_lon)
zo = origin_radius * :math.sin(origin_lat)
xp = point_radius * :math.cos(point_lat) * :math.cos(point_lon)
yp = point_radius * :math.cos(point_lat) * :math.sin(point_lon)
zp = point_radius * :math.sin(point_lat)
# Forward to the plane defined by the origin coordinates
xc = -:math.sin(origin_lon) * (xp - xo) + :math.cos(origin_lon) * (yp - yo)
yc =
-:math.sin(origin_lat) * :math.cos(origin_lon) * (xp - xo) -
:math.sin(origin_lat) * :math.sin(origin_lon) * (yp - yo) +
:math.cos(origin_lat) * (zp - zo)
# Rotate plane
case area do
%Shape.Circle{} -> [xc, yc]
%Shape.Rectangle{} -> rotate([xc, yc], area.angle)
%Shape.Ellipse{} -> rotate([xc, yc], area.angle)
end
end
defp rotate([x, y], azimuth) do
azimuth_radians = Calculator.degrees_to_radians(azimuth)
zenith = @pi / 2 - azimuth_radians
xr = x * :math.cos(zenith) + y * :math.sin(zenith)
yr = -x * :math.sin(zenith) + y * :math.cos(zenith)
[xr, yr]
end
defp geometric_function(%Shape.Circle{radius: r}, [x, y]) do
x_over_r = x / r
y_over_r = y / r
1 - x_over_r * x_over_r - y_over_r * y_over_r
end
defp geometric_function(%Shape.Rectangle{long_semi_axis: a, short_semi_axis: b}, [x, y]) do
x_over_a = x / a
y_over_b = y / b
min(1 - x_over_a * x_over_a, 1 - y_over_b * y_over_b)
end
defp geometric_function(%Shape.Ellipse{long_semi_axis: a, short_semi_axis: b}, [x, y]) do
x_over_a = x / a
y_over_b = y / b
1 - x_over_a * x_over_a - y_over_b * y_over_b
end
end | lib/geocalc/calculator/area.ex | 0.857976 | 0.922062 | area.ex | starcoder |
require Logger
defmodule ExoSQL do
@moduledoc """
Creates a Generic universal parser that can access many tabular databases,
and perform SQL queries.
The databases can be heterogenic, so you can perform searches mixing
data from postgres, mysql, csv or Google Analytics.
For example:
```
iex> {:ok, result} = ExoSQL.query(
...> "SELECT urls.url, status_code FROM urls INNER JOIN request ON request.url = urls.url",
...> %{
...> "A" => {ExoSQL.Csv, path: "test/data/csv/"},
...> "B" => {ExoSQL.HTTP, []}
...> })
...> ExoSQL.format_result(result)
'''
A.urls.url | B.request.status_code
-------------------------------------
https://serverboards.io/e404 | 404
http://www.facebook.com | 302
https://serverboards.io | 200
http://www.serverboards.io | 301
''' |> to_string
```
It also contains functions for all the steps of the process:
`parse` |> `plan` |> `execute`. They can be useful for debugging pourposes.
Finally there are helper functions as `explain` that prints out an explanation
of the plan, and `format_result` for pretty printing results.
"""
defmodule Query do
defstruct select: [],
distinct: nil,
crosstab: false,
from: [],
where: nil,
groupby: nil,
join: nil,
orderby: [],
limit: nil,
offset: nil,
union: nil,
with: []
end
defmodule Result do
defstruct columns: [],
rows: []
end
def parse(sql, context), do: ExoSQL.Parser.parse(sql, context)
def plan(parsed, context), do: ExoSQL.Planner.plan(parsed, context)
def execute(plan, context), do: ExoSQL.Executor.execute(plan, context)
def query(sql, context) do
# Logger.debug(inspect sql)
try do
with {:ok, parsed} <- ExoSQL.Parser.parse(sql, context),
{:ok, plan} <- ExoSQL.Planner.plan(parsed, context),
{:ok, result} <- ExoSQL.Executor.execute(plan, context) do
{:ok, result}
end
rescue
err in MatchError ->
case err.term do
{:error, error} ->
{:error, error}
other ->
{:error, {:match, other}}
end
any ->
{:error, any}
catch
any -> {:error, any}
end
# Logger.debug("parsed #{inspect parsed, pretty: true}")
# Logger.debug("planned #{inspect plan, pretty: true}")
end
def explain(sql, context) do
Logger.info("Explain #{inspect(sql)}")
{:ok, parsed} = ExoSQL.Parser.parse(sql, context)
{:ok, plan} = ExoSQL.Planner.plan(parsed, context)
Logger.info(inspect(plan, pretty: true))
end
def format_result(res), do: ExoSQL.Utils.format_result(res)
def schema("self", _context) do
{:ok, ["tables"]}
end
# Hack to allow internal non database varaibles at context
def schema("__" <> _rest, _context), do: {:ok, []}
def schema(db, context) do
{db, opts} = context[db]
apply(db, :schema, [opts])
end
def schema("self", "tables", _context) do
{:ok,
%{
columns: ["db", "table", "column"]
}}
end
def schema(db, table, context) do
case context[db] do
{db, opts} ->
apply(db, :schema, [opts, table])
nil ->
raise "#{inspect({db, table})} not found at extractors #{inspect(Map.keys(context))}"
end
end
@default_context %{
"A" => {ExoSQL.Csv, path: "test/data/csv/"},
"B" => {ExoSQL.HTTP, []}
}
def repl(context \\ @default_context) do
input = IO.gets("exosql> ") |> String.trim()
case input do
"\q" ->
:eof
"exit" ->
:eof
"quit" ->
:eof
"" ->
repl(context)
_other ->
case query(input, context) do
{:ok, result} ->
IO.puts(format_result(result))
{:error, err} ->
Logger.error(inspect(err))
end
repl(context)
end
end
def debug_mode(context) do
get_in(context, ["__vars__", "debug"])
end
end | lib/exosql.ex | 0.749271 | 0.796728 | exosql.ex | starcoder |
defmodule GenAMQP do
@moduledoc """
GenAMQP is a library to create easily Publish/Subscribe and RPC style in AMQP by defining some settings and using a friendly macro
In the settings file put:
```elixir
config :gen_amqp,
connections: [
{:static, StaticConnSup, ConnHub, "amqp://guest:guest@localhost"}
],
error_handler: ErrorHandler
```
The error handler must handle a failure structure that can be any
```elixir
defmodule ErrorHandler do
def handle(msg) do
Poison.encode!(%{
status: :error,
code: 0,
message: msg
})
end
end
```
The ServerDemo here uses the GenAMQP.Server and implements two functions, execute and handle. The execute function receives the incoming payload in string format and must return the tuple {:reply, content} where content is the response that will be returned in amqp or no reply if you don't need to response. The handle function handles the cases not matched in the execute function.
```elixir
defmodule ServerDemo do
@moduledoc false
use GenAMQP.Server, event: "server_demo", conn_name: Application.get_env(:gen_amqp, :conn_name)
def execute(payload) do
with {:ok, _} <- {:error, "error"} do
{:reply, "ok"}
end
end
def handle({:error, cause}) do
{:reply, cause}
end
end
```
In the systems there is a supervisor for the connection that can be dynamic or static, if it's static supervises one connection, if it's dynamic creates a new supervised connection for each client
"""
use Application
def start(_type, _args) do
conns = Application.get_env(:gen_amqp, :connections)
specs = conns_to_specs(conns)
# Define supervisors and child supervisors to be supervised
children = specs
opts = [strategy: :one_for_one, name: GenAMQP.AppSupervisor]
Supervisor.start_link(children, opts)
end
defp conns_to_specs(conns) do
import Supervisor.Spec, warn: false
Enum.map(conns, fn
{:static, sup_name, conns} ->
supervisor(GenAMQP.ConnSupervisor, [sup_name, conns], id: sup_name)
end)
end
end | lib/gen_amqp.ex | 0.770465 | 0.858837 | gen_amqp.ex | starcoder |
defmodule Volley do
@moduledoc """
GenStage and Broadway producers for EventStoreDB
Volley provides a GenStage producer `Volley.InOrderSubscription` and a
GenStage/Broadway producer `Volley.PersistentSubscription`. Both of these
subscription producers can read a stream from beginning to end and then
keep up-to-date as new events are published to the EventStoreDB.
These producers can be used to build a reactive, event-driven, eventually
consistent system suitable for Event Sourcing. In terms of Event Sourcing,
these producers can be used to build process managers, sagas, and read
models.
## InOrder vs. persistent subscriptions
The `Volley.InOrderSubscription` producer is a simpler subscription
model which uses `Spear.read_stream/3` and `Spear.subscribe/4` to read an
EventStoreDB stream in order. `Volley.InOrderSubscription` is a client-side
subscription: the client is responsible for storing its stream revision.
`Volley.PersistentSubscription`s use the Persistent Subscription feature
of EventStoreDB to store stream revisions and perform back-pressure on the
EventStoreDB server-side. Persistent subscriptions do not have strict
ordering guarantees, which allows features like competing consumers,
batch processing, and message-parking (with a built-in dead letter approach).
See the EventStoreDB documentation on persistent subscriptions and
`Spear.connect_to_persistent_subscription/5` for more information.
InOrder subscriptions have less resource impact on the EventStoreDB but are
less flexible than persistent subscriptions. InOrder subscriptions are subject
to head-of-line blocking: failing to process an event must halt the
subscription in order to keep ordering. Persistent subscriptions offer more
complex subscription strategies and can avoid head-of-line blocking but
handlers may be more complex or difficult to write as they need to account
for events potentially arriving out of order.
Systems do not need to be limited to only one kind of event listener: a mix
of in-order and persistent subscriptions may be wise.
"""
@genserver_option_keys ~w[debug name timeout spawn_opt hibernate_after]a
@producer_option_keys ~w[buffer_size buffer_keep dispatcher demand]a
# coveralls-ignore-start
@doc false
defmacro if_broadway(do: body) do
case Code.ensure_compiled(Broadway) do
{:module, Broadway} ->
body
_ ->
quote(do: :ok)
end
end
# coveralls-ignore-stop
@doc false
def pop_genserver_opts(opts) do
{Keyword.take(opts, @genserver_option_keys),
Keyword.drop(opts, @genserver_option_keys -- [:name])}
end
@doc false
def pop_producer_opts(opts) do
{Keyword.take(opts, @producer_option_keys),
Keyword.drop(opts, @producer_option_keys)}
end
# coveralls-ignore-start
@doc false
def yes, do: true
# coveralls-ignore-stop
end | lib/volley.ex | 0.785103 | 0.593433 | volley.ex | starcoder |
defmodule Base do
import Bitwise
@moduledoc """
This module provides data encoding and decoding functions
according to [RFC 4648](http://tools.ietf.org/html/rfc4648).
This document defines the commonly used base 16, base 32, and base
64 encoding schemes.
## Base 16 alphabet
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| 0| 4| 4| 8| 8| 12| C|
| 1| 1| 5| 5| 9| 9| 13| D|
| 2| 2| 6| 6| 10| A| 14| E|
| 3| 3| 7| 7| 11| B| 15| F|
## Base 32 alphabet
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| A| 9| J| 18| S| 27| 3|
| 1| B| 10| K| 19| T| 28| 4|
| 2| C| 11| L| 20| U| 29| 5|
| 3| D| 12| M| 21| V| 30| 6|
| 4| E| 13| N| 22| W| 31| 7|
| 5| F| 14| O| 23| X| | |
| 6| G| 15| P| 24| Y| (pad)| =|
| 7| H| 16| Q| 25| Z| | |
| 8| I| 17| R| 26| 2| | |
## Base 32 (extended hex) alphabet
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| 0| 9| 9| 18| I| 27| R|
| 1| 1| 10| A| 19| J| 28| S|
| 2| 2| 11| B| 20| K| 29| T|
| 3| 3| 12| C| 21| L| 30| U|
| 4| 4| 13| D| 22| M| 31| V|
| 5| 5| 14| E| 23| N| | |
| 6| 6| 15| F| 24| O| (pad)| =|
| 7| 7| 16| G| 25| P| | |
| 8| 8| 17| H| 26| Q| | |
## Base 64 alphabet
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| A| 17| R| 34| i| 51| z|
| 1| B| 18| S| 35| j| 52| 0|
| 2| C| 19| T| 36| k| 53| 1|
| 3| D| 20| U| 37| l| 54| 2|
| 4| E| 21| V| 38| m| 55| 3|
| 5| F| 22| W| 39| n| 56| 4|
| 6| G| 23| X| 40| o| 57| 5|
| 7| H| 24| Y| 41| p| 58| 6|
| 8| I| 25| Z| 42| q| 59| 7|
| 9| J| 26| a| 43| r| 60| 8|
| 10| K| 27| b| 44| s| 61| 9|
| 11| L| 28| c| 45| t| 62| +|
| 12| M| 29| d| 46| u| 63| /|
| 13| N| 30| e| 47| v| | |
| 14| O| 31| f| 48| w| (pad)| =|
| 15| P| 32| g| 49| x| | |
| 16| Q| 33| h| 50| y| | |
## Base 64 (URL and filename safe) alphabet
| Value | Encoding | Value | Encoding | Value | Encoding | Value | Encoding |
|------:|---------:|------:|---------:|------:|---------:|------:|---------:|
| 0| A| 17| R| 34| i| 51| z|
| 1| B| 18| S| 35| j| 52| 0|
| 2| C| 19| T| 36| k| 53| 1|
| 3| D| 20| U| 37| l| 54| 2|
| 4| E| 21| V| 38| m| 55| 3|
| 5| F| 22| W| 39| n| 56| 4|
| 6| G| 23| X| 40| o| 57| 5|
| 7| H| 24| Y| 41| p| 58| 6|
| 8| I| 25| Z| 42| q| 59| 7|
| 9| J| 26| a| 43| r| 60| 8|
| 10| K| 27| b| 44| s| 61| 9|
| 11| L| 28| c| 45| t| 62| -|
| 12| M| 29| d| 46| u| 63| _|
| 13| N| 30| e| 47| v| | |
| 14| O| 31| f| 48| w| (pad)| =|
| 15| P| 32| g| 49| x| | |
| 16| Q| 33| h| 50| y| | |
"""
b16_alphabet = Enum.with_index '0123456789ABCDEF'
b64_alphabet = Enum.with_index 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
b64url_alphabet = Enum.with_index 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
b32_alphabet = Enum.with_index 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
b32hex_alphabet = Enum.with_index '0123456789ABCDEFGHIJKLMNOPQRSTUV'
Enum.each [ {:enc16, :dec16, b16_alphabet},
{:enc64, :dec64, b64_alphabet},
{:enc32, :dec32, b32_alphabet},
{:enc64url, :dec64url, b64url_alphabet},
{:enc32hex, :dec32hex, b32hex_alphabet} ], fn({enc, dec, alphabet}) ->
for {encoding, value} <- alphabet do
defp unquote(enc)(unquote(value)), do: unquote(encoding)
defp unquote(dec)(unquote(encoding)), do: unquote(value)
end
defp unquote(dec)(c) do
raise ArgumentError, "non-alphabet digit found: \"#{<<c>>}\" (byte #{c})"
end
end
defp encode_case(:upper, func),
do: func
defp encode_case(:lower, func),
do: &to_lower(func.(&1))
defp decode_case(:upper, func),
do: func
defp decode_case(:lower, func),
do: &func.(from_lower(&1))
defp decode_case(:mixed, func),
do: &func.(from_mixed(&1))
defp to_lower(char) when char in ?A..?Z,
do: char + (?a - ?A)
defp to_lower(char),
do: char
defp from_lower(char) when char in ?a..?z,
do: char - (?a - ?A)
defp from_lower(char) when not char in ?A..?Z,
do: char
defp from_lower(char),
do: raise(ArgumentError, "non-alphabet digit found: \"#{<<char>>}\" (byte #{char})")
defp from_mixed(char) when char in ?a..?z,
do: char - (?a - ?A)
defp from_mixed(char),
do: char
@doc """
Encodes a binary string into a base 16 encoded string.
Accepts an atom `:upper` (default) for encoding to upper case characters or
`:lower` for lower case characters.
## Examples
iex> Base.encode16("foobar")
"666F6F626172"
iex> Base.encode16("foobar", case: :lower)
"666f6f626172"
"""
@spec encode16(binary) :: binary
@spec encode16(binary, Keyword.t) :: binary
def encode16(data, opts \\ []) when is_binary(data) do
case = Keyword.get(opts, :case, :upper)
do_encode16(data, encode_case(case, &enc16/1))
end
@doc """
Decodes a base 16 encoded string into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
## Examples
iex> Base.decode16("666F6F626172")
{:ok, "foobar"}
iex> Base.decode16("666f6f626172", case: :lower)
{:ok, "foobar"}
iex> Base.decode16("666f6F626172", case: :mixed)
{:ok, "foobar"}
"""
@spec decode16(binary) :: {:ok, binary} | :error
@spec decode16(binary, Keyword.t) :: {:ok, binary} | :error
def decode16(string, opts \\ []) when is_binary(string) do
case = Keyword.get(opts, :case, :upper)
{:ok, do_decode16(string, decode_case(case, &dec16/1))}
rescue
ArgumentError -> :error
end
@doc """
Decodes a base 16 encoded string into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
An `ArgumentError` exception is raised if the padding is incorrect or
a non-alphabet character is present in the string.
## Examples
iex> Base.decode16!("666F6F626172")
"foobar"
iex> Base.decode16!("666f6f626172", case: :lower)
"foobar"
iex> Base.decode16!("666f6F626172", case: :mixed)
"foobar"
"""
@spec decode16!(binary) :: binary
@spec decode16!(binary, Keyword.t) :: binary
def decode16!(string, opts \\ []) when is_binary(string) do
case = Keyword.get(opts, :case, :upper)
do_decode16(string, decode_case(case, &dec16/1))
end
@doc """
Encodes a binary string into a base 64 encoded string.
## Examples
iex> Base.encode64("foobar")
"Zm9vYmFy"
"""
@spec encode64(binary) :: binary
def encode64(data) when is_binary(data) do
do_encode64(data, &enc64/1)
end
@doc """
Decodes a base 64 encoded string into a binary string.
## Examples
iex> Base.decode64("Zm9vYmFy")
{:ok, "foobar"}
"""
@spec decode64(binary) :: {:ok, binary} | :error
def decode64(string) when is_binary(string) do
{:ok, do_decode64(string, &dec64/1)}
rescue
ArgumentError -> :error
end
@doc """
Decodes a base 64 encoded string into a binary string.
The following alphabet is used both for encoding and decoding:
An `ArgumentError` exception is raised if the padding is incorrect or
a non-alphabet character is present in the string.
## Examples
iex> Base.decode64!("Zm9vYmFy")
"foobar"
"""
@spec decode64!(binary) :: binary
def decode64!(string) when is_binary(string) do
do_decode64(string, &dec64/1)
end
@doc """
Encodes a binary string into a base 64 encoded string with URL and filename
safe alphabet.
## Examples
iex> Base.url_encode64(<<255, 127, 254, 252>>)
"_3_-_A=="
"""
@spec url_encode64(binary) :: binary
def url_encode64(data) when is_binary(data) do
do_encode64(data, &enc64url/1)
end
@doc """
Decodes a base 64 encoded string with URL and filename safe alphabet
into a binary string.
## Examples
iex> Base.url_decode64("_3_-_A==")
{:ok, <<255, 127, 254, 252>>}
"""
@spec url_decode64(binary) :: {:ok, binary} | :error
def url_decode64(string) when is_binary(string) do
{:ok, do_decode64(string, &dec64url/1)}
rescue
ArgumentError -> :error
end
@doc """
Decodes a base 64 encoded string with URL and filename safe alphabet
into a binary string.
An `ArgumentError` exception is raised if the padding is incorrect or
a non-alphabet character is present in the string.
## Examples
iex> Base.url_decode64!("_3_-_A==")
<<255, 127, 254, 252>>
"""
@spec url_decode64!(binary) :: binary
def url_decode64!(string) when is_binary(string) do
do_decode64(string, &dec64url/1)
end
@doc """
Encodes a binary string into a base 32 encoded string.
Accepts an atom `:upper` (default) for encoding to upper case characters or
`:lower` for lower case characters.
## Examples
iex> Base.encode32("foobar")
"MZXW6YTBOI======"
iex> Base.encode32("foobar", case: :lower)
"mzxw6ytboi======"
"""
@spec encode32(binary) :: binary
@spec encode32(binary, Keyword.t) :: binary
def encode32(data, opts \\ []) when is_binary(data) do
case = Keyword.get(opts, :case, :upper)
do_encode32(data, encode_case(case, &enc32/1))
end
@doc """
Decodes a base 32 encoded string into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
## Examples
iex> Base.decode32("MZXW6YTBOI======")
{:ok, "foobar"}
iex> Base.decode32("mzxw6ytboi======", case: :lower)
{:ok, "foobar"}
iex> Base.decode32("mzXW6ytBOi======", case: :mixed)
{:ok, "foobar"}
"""
@spec decode32(binary) :: {:ok, binary} | :error
@spec decode32(binary, Keyword.t) :: {:ok, binary} | :error
def decode32(string, opts \\ []) do
case = Keyword.get(opts, :case, :upper)
{:ok, do_decode32(string, decode_case(case, &dec32/1))}
rescue
ArgumentError -> :error
end
@doc """
Decodes a base 32 encoded string into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
An `ArgumentError` exception is raised if the padding is incorrect or
a non-alphabet character is present in the string.
## Examples
iex> Base.decode32!("MZXW6YTBOI======")
"foobar"
iex> Base.decode32!("mzxw6ytboi======", case: :lower)
"foobar"
iex> Base.decode32!("mzXW6ytBOi======", case: :mixed)
"foobar"
"""
@spec decode32!(binary) :: binary
@spec decode32!(binary, Keyword.t) :: binary
def decode32!(string, opts \\ []) do
case = Keyword.get(opts, :case, :upper)
do_decode32(string, decode_case(case, &dec32/1))
end
@doc """
Encodes a binary string into a base 32 encoded string with an
extended hexadecimal alphabet.
Accepts an atom `:upper` (default) for encoding to upper case characters or
`:lower` for lower case characters.
## Examples
iex> Base.hex_encode32("foobar")
"CPNMUOJ1E8======"
iex> Base.hex_encode32("foobar", case: :lower)
"cpnmuoj1e8======"
"""
@spec hex_encode32(binary) :: binary
@spec hex_encode32(binary, Keyword.t) :: binary
def hex_encode32(data, opts \\ []) when is_binary(data) do
case = Keyword.get(opts, :case, :upper)
do_encode32(data, encode_case(case, &enc32hex/1))
end
@doc """
Decodes a base 32 encoded string with extended hexadecimal alphabet
into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
## Examples
iex> Base.hex_decode32("CPNMUOJ1E8======")
{:ok, "foobar"}
iex> Base.hex_decode32("cpnmuoj1e8======", case: :lower)
{:ok, "foobar"}
iex> Base.hex_decode32("cpnMuOJ1E8======", case: :mixed)
{:ok, "foobar"}
"""
@spec hex_decode32(binary) :: {:ok, binary} | :error
@spec hex_decode32(binary, Keyword.t) :: {:ok, binary} | :error
def hex_decode32(string, opts \\ []) when is_binary(string) do
case = Keyword.get(opts, :case, :upper)
{:ok, do_decode32(string, decode_case(case, &dec32hex/1))}
rescue
ArgumentError -> :error
end
@doc """
Decodes a base 32 encoded string with extended hexadecimal alphabet
into a binary string.
Accepts an atom `:upper` (default) for decoding from upper case characters or
`:lower` for lower case characters. `:mixed` can be given for mixed case
characters.
An `ArgumentError` exception is raised if the padding is incorrect or
a non-alphabet character is present in the string.
## Examples
iex> Base.hex_decode32!("CPNMUOJ1E8======")
"foobar"
iex> Base.hex_decode32!("cpnmuoj1e8======", case: :lower)
"foobar"
iex> Base.hex_decode32!("cpnMuOJ1E8======", case: :mixed)
"foobar"
"""
@spec hex_decode32!(binary) :: binary
@spec hex_decode32!(binary, Keyword.t) :: binary
def hex_decode32!(string, opts \\ []) when is_binary(string) do
case = Keyword.get(opts, :case, :upper)
do_decode32(string, decode_case(case, &dec32hex/1))
end
defp do_encode16(<<>>, _), do: <<>>
defp do_encode16(data, enc) do
for <<c::4 <- data>>, into: <<>>, do: <<enc.(c)::8>>
end
defp do_decode16(<<>>, _), do: <<>>
defp do_decode16(string, dec) when rem(byte_size(string), 2) == 0 do
for <<c1::8, c2::8 <- string>>, into: <<>> do
<<dec.(c1)::4, dec.(c2)::4>>
end
end
defp do_decode16(_, _) do
raise ArgumentError, "odd-length string"
end
defp do_encode64(<<>>, _), do: <<>>
defp do_encode64(data, enc) do
split = 3 * div(byte_size(data), 3)
<<main::size(split)-binary, rest::binary>> = data
main = for <<c::6 <- main>>, into: <<>>, do: <<enc.(c)::8>>
case rest do
<<c1::6, c2::6, c3::4>> ->
<<main::binary, enc.(c1)::8, enc.(c2)::8, enc.(bsl(c3, 2))::8, ?=>>
<<c1::6, c2::2>> ->
<<main::binary, enc.(c1)::8, enc.(bsl(c2, 4))::8, ?=, ?=>>
<<>> ->
main
end
end
defp do_decode64(<<>>, _), do: <<>>
defp do_decode64(string, dec) when rem(byte_size(string), 4) == 0 do
split = byte_size(string) - 4
<<main::size(split)-binary, rest::binary>> = string
main = for <<c::8 <- main>>, into: <<>>, do: <<dec.(c)::6>>
case rest do
<<c1::8, c2::8, ?=, ?=>> ->
<<main::binary, dec.(c1)::6, bsr(dec.(c2), 4)::2>>
<<c1::8, c2::8, c3::8, ?=>> ->
<<main::binary, dec.(c1)::6, dec.(c2)::6, bsr(dec.(c3), 2)::4>>
<<c1::8, c2::8, c3::8, c4::8>> ->
<<main::binary, dec.(c1)::6, dec.(c2)::6, dec.(c3)::6, dec.(c4)::6>>
<<>> ->
main
end
end
defp do_decode64(_, _) do
raise ArgumentError, "incorrect padding"
end
defp do_encode32(<<>>, _), do: <<>>
defp do_encode32(data, enc) do
split = 5 * div(byte_size(data), 5)
<<main::size(split)-binary, rest::binary>> = data
main = for <<c::5 <- main>>, into: <<>>, do: <<enc.(c)::8>>
case rest do
<<cfdf8:f53e:61e4::18, cfdf8:f53e:61e4::18, c3::5, c4::5, c5::5, c6::5, c7::2>> ->
<<main::binary,
enc.(c1)::8, enc.(c2)::8, enc.(c3)::8, enc.(c4)::8,
enc.(c5)::8, enc.(c6)::8, enc.(bsl(c7, 3))::8, ?=>>
<<cfdf8:f53e:61e4::18, cfdf8:f53e:61e4::18, cfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, c4::5, c5::4>> ->
<<main::binary,
enc.(c1)::8, enc.(c2)::8, enc.(c3)::8, enc.(c4)::8,
enc.(bsl(c5, 1))::8, ?=, ?=, ?=>>
<<cfdf8:f53e:61e4::18, cfdf8:f53e:61e4::18, c3::5, c4::1>> ->
<<main::binary,
enc.(c1)::8, enc.(c2)::8, enc.(c3)::8, enc.(bsl(c4, 4))::8,
?=, ?=, ?=, ?=>>
<<c1::5, c2::3>> ->
<<main::binary,
enc.(c1)::8, enc.(bsl(c2, 2))::8, ?=, ?=,
?=, ?=, ?=, ?=>>
<<>> ->
main
end
end
defp do_decode32(<<>>, _), do: <<>>
defp do_decode32(string, dec) when rem(byte_size(string), 8) == 0 do
split = byte_size(string) - 8
<<main::size(split)-binary, rest::binary>> = string
main = for <<c::8 <- main>>, into: <<>>, do: <<dec.(c)::5>>
case rest do
<<cfdf8:f53e:61e4::18, cfdf8:f53e:61e4::18, ?=, ?=, ?=, ?=, ?=, ?=>> ->
<<main::binary, dec.(c1)::5, bsr(dec.(c2), 2)::3>>
<<c1::8, cfdf8:f53e:61e4::18, c3::8, c4::8, ?=, ?=, ?=, ?=>> ->
<<main::binary,
dec.(c1)::5, dec.(c2)::5, dec.(c3)::5, bsr(dec.(c4), 4)::1>>
<<c1::8, c2::8, c3::8, c4::8, c5::8, ?=, ?=, ?=>> ->
<<main::binary,
dec.(c1)::5, dec.(c2)::5, dec.(c3)::5, dec.(c4)::5,
bsr(dec.(c5), 1)::4>>
<<c1::8, c2::8, c3::8, c4::8, c5::8, c6::8, c7::8, ?=>> ->
<<main::binary,
dec.(c1)::5, dec.(c2)::5, dec.(c3)::5, dec.(c4)::5,
dec.(c5)::5, dec.(c6)::5, bsr(dec.(c7), 3)::2>>
<<c1::8, c2::8, c3::8, c4::8, c5::8, c6::8, c7::8, c8::8>> ->
<<main::binary,
dec.(c1)::5, dec.(c2)::5, dec.(c3)::5, dec.(c4)::5,
dec.(c5)::5, dec.(c6)::5, dec.(c7)::5, dec.(c8)::5>>
<<>> ->
main
end
end
defp do_decode32(_, _) do
raise ArgumentError, "incorrect padding"
end
end | lib/elixir/lib/base.ex | 0.663996 | 0.668366 | base.ex | starcoder |
defmodule AWS.Importexport do
@moduledoc """
AWS Import/Export Service
AWS Import/Export accelerates transferring large amounts of data between
the AWS cloud and portable storage devices that you mail to us. AWS
Import/Export transfers data directly onto and off of your storage devices
using Amazon's high-speed internal network and bypassing the Internet. For
large data sets, AWS Import/Export is often faster than Internet transfer
and more cost effective than upgrading your connectivity.
"""
@doc """
This operation cancels a specified job. Only the job owner can cancel it.
The operation fails if the job has already started or is complete.
"""
def cancel_job(client, input, options \\ []) do
request(client, "CancelJob", input, options)
end
@doc """
This operation initiates the process of scheduling an upload or download of
your data. You include in the request a manifest that describes the data
transfer specifics. The response to the request includes a job ID, which
you can use in other operations, a signature that you use to identify your
storage device, and the address where you should ship your storage device.
"""
def create_job(client, input, options \\ []) do
request(client, "CreateJob", input, options)
end
@doc """
This operation generates a pre-paid UPS shipping label that you will use to
ship your device to AWS for processing.
"""
def get_shipping_label(client, input, options \\ []) do
request(client, "GetShippingLabel", input, options)
end
@doc """
This operation returns information about a job, including where the job is
in the processing pipeline, the status of the results, and the signature
value associated with the job. You can only return information about jobs
you own.
"""
def get_status(client, input, options \\ []) do
request(client, "GetStatus", input, options)
end
@doc """
This operation returns the jobs associated with the requester. AWS
Import/Export lists the jobs in reverse chronological order based on the
date of creation. For example if Job Test1 was created 2009Dec30 and Test2
was created 2010Feb05, the ListJobs operation would return Test2 followed
by Test1.
"""
def list_jobs(client, input, options \\ []) do
request(client, "ListJobs", input, options)
end
@doc """
You use this operation to change the parameters specified in the original
manifest file by supplying a new manifest file. The manifest file attached
to this request replaces the original manifest file. You can only use the
operation after a CreateJob request but before the data transfer starts and
you can only use it on jobs you own.
"""
def update_job(client, input, options \\ []) do
request(client, "UpdateJob", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "importexport",
region: "us-east-1"}
host = build_host("importexport", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-www-form-urlencoded"}
]
input = Map.merge(input, %{"Action" => action, "Version" => "2010-06-01"})
payload = AWS.Util.encode_query(input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, AWS.Util.decode_xml(body), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = AWS.Util.decode_xml(body)
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{endpoint: endpoint}) do
"#{endpoint_prefix}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end | lib/aws/importexport.ex | 0.712032 | 0.495117 | importexport.ex | starcoder |
defmodule PgRanges.NumRange do
@moduledoc """
Wraps a `Postgrex.Range` and casts to a PostgreSQL `numrange` type.
"""
use PgRanges
@type t :: %__MODULE__{
lower: float(),
lower_inclusive: boolean(),
upper: float(),
upper_inclusive: boolean()
}
@doc false
@impl true
def type, do: :numrange
@doc false
@impl true
@spec from_postgrex(Range.t()) :: __MODULE__.t()
def from_postgrex(
%Range{
lower: %Decimal{},
upper: %Decimal{}
} = range
) do
struct!(__MODULE__, Map.from_struct(range))
end
@impl true
@spec from_postgrex(Range.t()) :: __MODULE__.t()
def from_postgrex(
%Range{
lower: lower,
upper: upper
} = range
) do
fields =
range
|> Map.from_struct()
|> Map.merge(%{
lower: to_decimal(lower),
upper: to_decimal(upper)
})
struct!(__MODULE__, fields)
end
@doc false
@impl true
@spec to_postgrex(__MODULE__.t()) :: Range.t()
def to_postgrex(
%__MODULE__{
lower: %Decimal{},
upper: %Decimal{}
} = range
),
do: struct!(Range, Map.from_struct(range))
def to_postgrex(
%__MODULE__{
lower: lower,
upper: upper
} = range
) do
fields =
range
|> Map.from_struct()
|> Map.merge(%{
lower: to_decimal(lower),
upper: to_decimal(upper)
})
struct!(Range, fields)
end
@doc """
Creates a new `#{__MODULE__}` struct. It expects the _lower_ and _upper_
attributes to be acceptable by `Decimal.new/1`.
This will convert tany acceptable input to Decimal.
## Options
- `lower_inclusive`: should the range be lower inclusive? Default is `true`
- `upper_inclusive`: should the range be upper inclusive? Default is `false`
"""
@spec new(any, any, keyword()) :: __MODULE__.t()
@impl true
def new(lower, upper, opts \\ []) do
fields = Keyword.merge(opts, lower: to_decimal(lower), upper: to_decimal(upper))
struct!(__MODULE__, fields)
end
defp to_decimal(value) when is_float(value), do: Decimal.from_float(value)
defp to_decimal(value), do: Decimal.new(value)
end | lib/pg_ranges/numrange.ex | 0.90485 | 0.430806 | numrange.ex | starcoder |
defmodule Statux.Transitions do
@moduledoc """
Handles evaluation and execution of a Transition from one to another or the same status.
"""
alias Statux.Models.EntityStatus
alias Statux.Models.Status
require Logger
@doc """
Pass in an entity state, a list of options and the name of the status
iex> maybe_transition(entity_state, :battery_voltage, [:low])
updated_entity_state
to check constraints for the given status_name and options and, if the constraints are
fulfilled, alter the entity_state to the new status.
As a side effect, this function may
1. broadcast PubSub messages, if PubSub is configured, and/or
2. trigger the callback functions provided in the rule set for :enter, :stay, :exit (to be
implemented)
You may use these side effects to react to updates in your application.
"""
def transition(%EntityStatus{} = entity_state, _status_name, [] = _no_valid_options, _pubsub) do
entity_state
end
# One valid option -> Awesome
def transition(%EntityStatus{} = entity_state, status_name, [{transition?, from, to}], %{module: pubsub, topic: topic}) do
same_as_before? = from == to
cond do
transition? and same_as_before? ->
publish(pubsub, topic, {:stay, status_name, to, entity_state.id})
entity_state
transition? and not same_as_before? ->
publish(pubsub, topic, {:exit, status_name, from, entity_state.id})
publish(pubsub, topic, {:enter, status_name, to, entity_state.id})
modify_current_state_in_entity(entity_state, status_name, to)
true ->
entity_state # Constraints not fulfilled, nothing to do.
end
end
# Multiple valid options. How do we choose?! Log error, pick first.
def transition(%EntityStatus{} = entity_state, status_name, [{_true, from, to} = option | _other_options] = options, pubsub) do
Logger.error("Statux conflict: Tried to transition '#{status_name}' from '#{from}' to multiple options #{inspect options |> Enum.map(fn {_, _, option} -> option end)} simultaneously. Defaulting to first option '#{to}'.")
transition(%EntityStatus{} = entity_state, status_name, [option], pubsub)
end
defp publish(nil, _topic, _content), do: :noop
defp publish(_pubsub, nil, _content), do: :noop
defp publish(pubsub, topic, content), do: Phoenix.PubSub.broadcast!(pubsub, topic, content)
defp modify_current_state_in_entity(entity_state, status_name, option) do
entity_state
|> update_in([:current_status, Access.key(status_name, %{})], fn status ->
Status.transition(status, option)
end)
end
end | lib/Statux/transitions.ex | 0.733738 | 0.492371 | transitions.ex | starcoder |
defmodule ExUnit.Callbacks do
@moduledoc %B"""
This module defines four callbacks: `setup_all`, `teardown_all`,
`setup` and `teardown`.
Those callbacks are defined via macros and each one can optionally receive a
keyword list with metadata, usually referred to as `context`. The callback
may optionally put extra data into `context` to be used in the tests.
**Note**: `setup` and `teardown` callbacks share the same context, it
provides an ExUnit.Test record associated with the `:test` key. `setup_all`
and `teardown_all` share their own context in a similar way, but this one
provides an ExUnit.TestCase record associated with the `:case` key.
If you return `{ :ok, <keyword list> }` from `setup` or `teardown`, the keyword
list will get merged into the context that will be available in all
subsequent `setup`, `test`, or `teardown` calls.
Similarly, returning `{ :ok, <keyword list> }` from `setup_all` or
`teardown_all` will merge the keyword list into the context that will be
available in all subsequent `setup_all` or `teardown_all` calls.
Returning :ok leaves the context unchanged in both cases.
Returning anything else from `setup` or `teardown` will force the current
test to fail, and subsequent `setup`, `test`, and `teardown` callbacks won't
be called for it.
Returning anything else from `setup_all` or `teardown_all` will force the
whole case to fail, and no other callback will be called.
It is allowed to define multiple `setup` or `teardown` callbacks, they will
be called sequentially in the order of definition before each test. The
returned keyword list from the last `setup` will be merged into the context passed to
the `test` and `teardown` (if defined) callbacks.
In the case of `setup_all` and `teardown_all` callbacks, each `setup_all`
will be called only once before the first test's `setup` and each
`teardown_all` will be called once after the last test. The returned keyword
list from the last `setup_all` will get merged into the context passed to the
`teardown_all` callbacks.
## Examples
defmodule AssertionTest do
use ExUnit.Case, async: true
# `setup` is called before each test is run
setup do
IO.puts "This is a setup callback"
# Return extra metadata, it has to be a keyword list
{ :ok, [hello: "world"] }
end
# Same as `setup`, but receives the context for the current test
setup context do
# We can access the test record in the context
IO.puts "Setting up: #{context[:test]}"
# We can also access the data returned from `setup/0`
assert context[:hello] == "world"
# No metadata
:ok
end
# This is called after each test finishes
teardown context do
assert context[:hello] == "world"
:ok
end
test "always pass" do
assert true
end
test "another one", context do
assert context[:hello] == "world"
end
end
"""
@doc false
defmacro __using__(opts) do
parent = opts[:parent]
quote do
@exunit_setup []
@exunit_teardown []
@exunit_setup_all []
@exunit_teardown_all []
@before_compile unquote(__MODULE__)
import unquote(__MODULE__)
def __exunit__(:parent) do
unquote(parent)
end
def __exunit__(:setup, context) do
__exunit_setup__ unquote(parent).__exunit__(:setup, context)
end
def __exunit__(:teardown, context) do
unquote(parent).__exunit__(:teardown, __exunit_teardown__ context)
end
def __exunit__(:setup_all, context) do
__exunit_setup_all__ unquote(parent).__exunit__(:setup_all, context)
end
def __exunit__(:teardown_all, context) do
unquote(parent).__exunit__(:teardown_all, __exunit_teardown_all__ context)
end
end
end
@doc false
defmacro __before_compile__(env) do
[ compile_callbacks(env, :exunit_setup),
compile_callbacks(env, :exunit_teardown),
compile_callbacks(env, :exunit_setup_all),
compile_callbacks(env, :exunit_teardown_all) ]
end
@doc """
Called before the start of each test.
"""
defmacro setup(var // quote(do: _), block) do
quote do
name = :"__exunit_setup_#{length(@exunit_setup)}"
defp name, [unquote(escape var)], [], unquote(escape block)
@exunit_setup [name|@exunit_setup]
end
end
@doc """
Called after the finish of each test. Note that if the test crashed with an :exit
message, `teardown` will not be run.
"""
defmacro teardown(var // quote(do: _), block) do
quote do
name = :"__exunit_teardown_#{length(@exunit_teardown)}"
defp name, [unquote(escape var)], [], unquote(escape block)
@exunit_teardown [name|@exunit_teardown]
end
end
@doc """
Called before the start of a case, i.e. called once before the first test in
the current module and before any `setup` callbacks.
"""
defmacro setup_all(var // quote(do: _), block) do
quote do
name = :"__exunit_setup_all_#{length(@exunit_setup_all)}"
defp name, [unquote(escape var)], [], unquote(escape block)
@exunit_setup_all [name|@exunit_setup_all]
end
end
@doc """
Called once after the last test finishes without emitting an :exit message.
"""
defmacro teardown_all(var // quote(do: _), block) do
quote do
name = :"__exunit_teardown_all_#{length(@exunit_teardown_all)}"
defp name, [unquote(escape var)], [], unquote(escape block)
@exunit_teardown_all [name|@exunit_teardown_all]
end
end
## Helpers
@doc false
def __merge__(_mod, other, :ok), do: other
def __merge__(_mod, other, { :ok, data }) when is_list(data), do: Keyword.merge(other, data)
def __merge__(mod, _, failure) do
raise "expected ExUnit callback in #{inspect mod} to return :ok " <>
" or { :ok, data }, got #{inspect failure} instead"
end
defp escape(contents) do
Macro.escape_quoted(contents)
end
defp compile_callbacks(env, kind) do
callbacks = Module.get_attribute(env.module, kind) |> Enum.reverse
acc =
Enum.reduce callbacks, quote(do: context), fn(callback, acc) ->
quote do
context = unquote(acc)
unquote(__MODULE__).__merge__(__MODULE__, context, unquote(callback)(context))
end
end
quote do
defp unquote(:"__#{kind}__")(context), do: unquote(acc)
end
end
end | lib/ex_unit/lib/ex_unit/callbacks.ex | 0.828454 | 0.607721 | callbacks.ex | starcoder |
defmodule Goodoo do
@moduledoc """
Goodoo is a simple, robust, and highly customizable health check solution written in Elixir.
Goodoo works by periodically checking the availablity of the sub-systems based
on your configuration, and provides a few APIs to retrieves the report.
To start using Goodoo, create a module:
defmodule MyHealthCheck do
use Goodoo
end
After that, add the module with the desired checkers to the supervisor tree.
Please see the "Checkers" section for all currently supported checkers.
checkers = %{
"primary" => {Goodoo.Checker.EctoSQL, repo: MyPrimaryRepo},
"replica" => {Goodoo.Checker.EctoSQL, repo: MyReplicaRepo},
"persistent_cache" => {Goodoo.Checker.Redix, connection: MyCache}
}
children = [
MyPrimaryRepo,
MyReplicaRepo,
MyCache,
...,
{MyHealthCheck, checkers},
MyEndpoint
]
Supervisor.start_link(children, strategy: :one_for_one, name: MyApp)
Allez, hop! You are "goodoo" to go. To retrieve the health check report,
`list_health_states/1` and `get_health_state/2` can be used.
Usually you might want to expose an HTTP endpoint for some uptime checkers e.g.
AWS ALB, Pingdom, etc. It can be easily done with Plug.
defmodule MyRouter do
use Plug.Router
plug :match
plug :dispatch
get "/health" do
healthy? =
Enum.all?(
Goodoo.list_health_states(MyHealthCheck),
fn {_checker_name, {state, _last_checked_at}} ->
state == :healthy
end
)
if healthy? do
send_resp(conn, 200, "Everything is 200 OK")
else
send_resp(conn, 503, "Something is on fire!")
end
end
get "/health/:checker_name" do
case Goodoo.get_health_state(MyHealthCheck, checker_name) do
nil ->
send_resp(conn, 404, "Not found")
{state, _last_checked_at} ->
if state == :healthy do
send_resp(conn, 200, "Service is doing fine")
else
send_resp(conn, 503, "Service is on fire")
end
end
end
end
### Checkers
Goodoo implemented a few common checkers:
* `Goodoo.Checker.EctoSQL` - checkers for works with `Ecto.Repo`.
* `Goodoo.Checker.Redix` - Checker that works with `Redix`.
For more information, please visit the documentation for them accordingly.
Goodoo supports customer checkers, please visit `Goodoo.Checker` for more information.
### Checker scheduling/interval
Goodoo schedules checkers based on the last health state. The default intervals are:
* `:healthy` - next check will be in 30 seconds.
* `:degraded` - next check will be in 10 seconds.
* `:unhealthy` - next check will be in 3 seconds.
You can configure your own strategy with the following example. Please note that missing
intervals will fall back to the defaults.
# `:healthy` and `:degraded` will fall back to defaults.
repo_intervals = %{
unhealthy: 1_000
}
cache_intervals = %{
unhealthy: 1_000,
degraded: 5_000,
healthy: 15_000
}
checkers = %{
"repo" => {Goodoo.Checker.EctoSQL, repo: MyRepo, intervals: repo_intervals},
"cache" => {Goodoo.Checker.EctoSQL, connection: MyCache, intervals: cache_intervals}
}
"""
defmacro __using__(_) do
quote location: :keep do
import Goodoo
def child_spec(checkers) do
Goodoo.Supervisor.child_spec({__MODULE__, checkers})
end
end
end
@doc """
Retrieves all checker statuses of a healthcheck module.
"""
@spec list_health_states(module()) :: %{
Goodoo.Checker.name() => {Goodoo.Checker.health_state(), DateTime.t()}
}
def list_health_states(module) do
module
|> Goodoo.Storage.get_storage_name()
|> Goodoo.Storage.list()
end
@doc """
Retrieve the checker status by its name of a healthcheck module
"""
@spec get_health_state(module(), Goodoo.Checker.name()) ::
{Goodoo.Checker.health_state(), DateTime.t()} | nil
def get_health_state(module, name) do
module
|> Goodoo.Storage.get_storage_name()
|> Goodoo.Storage.get(name)
end
end | lib/goodoo.ex | 0.756627 | 0.428592 | goodoo.ex | starcoder |
defmodule AttributeRepository.Search do
@moduledoc """
Callback for searching resources
"""
@type search_result :: [search_entry()]
@type search_entry :: {AttributeRepository.resource_id(), AttributeRepository.resource()}
@doc """
Search for resources using the filter.
The search filter syntax is the one of RFC7644
(section [query resources](https://tools.ietf.org/html/rfc7644#section-3.4.2)).
When inserting `use AttributeRepository.Search` at the begining of an implementation,
the `search(String.t(), [AttributeRepository.attribute_name()] | :all,
AttributeRepository.run_opts())` function version will be created. That function
automatically parses the `String.t()` query and passes it to the callback below.
## Example
```elixir
iex> AttributeRepositoryRiak.search(~s(first_name co "v" or last_name sw "Le"), :all, run_opts)
[
{"MQNL5ASVNLWZTLJA4MDGHKEXOQ",
%{
"first_name" => "Hervé",
"last_name" => "<NAME>",
"shoe_size" => 48,
"subscription_date" => #DateTime<2017-10-19 12:07:03Z>
}},
{"DKO77TT652NZHXX3WM3ZJBFIC4",
%{
"first_name" => "Claude",
"last_name" => "Leblanc",
"shoe_size" => 43,
"subscription_date" => #DateTime<2014-06-13 04:42:34Z>
}},
{"WCJBCL7SC2THS7TSRXB2KZH7OQ",
%{
"first_name" => "Narivelo",
"last_name" => "Rajaonarimanana",
"newsletter_subscribed" => false,
"shoe_size" => 41,
"subscription_date" => #DateTime<2017-06-06 21:01:43Z>
}}
]
```
"""
@callback search(
AttributeRepository.Search.Filter.t(),
[AttributeRepository.attribute_name()] | :all,
AttributeRepository.run_opts()
) ::
{:ok, search_result()}
| {:error, %AttributeRepository.ReadError{}}
| {:error, %AttributeRepository.UnsupportedError{}}
| {:error, %AttributeRepository.Search.Filter.InvalidError{}}
defmacro __using__(_opts) do
quote do
def search(filter_str, attributes, run_opts) when is_binary(filter_str) do
case AttributeRepository.Search.Filter.parse(filter_str) do
{:ok, filter} ->
search(filter, attributes, run_opts)
{:error, reason} ->
{:error, AttributeRepository.ReadError.exception(inspect(reason))}
end
end
end
end
end | lib/attribute_repository/search.ex | 0.897179 | 0.607256 | search.ex | starcoder |
defmodule Kino.JS.Live do
@moduledoc ~S'''
Introduces state and event-driven capabilities to JavaScript
powered kinos.
Make sure to read the introduction to JavaScript kinos in
`Kino.JS` for more context.
Similarly to static kinos, live kinos involve a custom JavaScript
code running in the browser. In fact, this part of the API is the
same. In addition, each live kino has a server process running on
the Elixir side, responsible for maintaining state and able to
communicate with the JavaScript side at any time. Again, to illustrate
the ideas we start with a minimal example.
## Example
We will follow up on our `KinoDocs.HTML` example by adding support
for replacing the content on demand.
defmodule KinoDocs.LiveHTML do
use Kino.JS
use Kino.JS.Live
def new(html) do
Kino.JS.Live.new(__MODULE__, html)
end
def replace(kino, html) do
Kino.JS.Live.cast(kino, {:replace, html})
end
@impl true
def init(html, ctx) do
{:ok, assign(ctx, html: html)}
end
@impl true
def handle_connect(ctx) do
{:ok, ctx.assigns.html, ctx}
end
@impl true
def handle_cast({:replace, html}, ctx) do
broadcast_event(ctx, "replace", html)
{:noreply, assign(ctx, html: html)}
end
asset "main.js" do
"""
export function init(ctx, html) {
ctx.root.innerHTML = html;
ctx.handleEvent("replace", (html) => {
ctx.root.innerHTML = html;
});
}
"""
end
end
Just as before we define a module, this time calling it
`KinoDocs.LiveHTML` for clarity. Note many similarities to
the previous version, we still call `use Kino.JS`, define
the `main.js` file and define the `new(html)` function for
creating a kino instance. As a matter of fact, the initial
result of `KinoDocs.LiveHTML.new(html)` will render exactly
the same as our previous `KinoDocs.HTML.new(html)`.
As for the new bits, we added `use Kino.JS.Live` to define
a live kino server. We use `Kino.JS.Live.new/2` for creating
the kino instance and we implement a few `GenServer`-like
callbacks.
Once the kino server is started with `Kino.JS.Live.new/2`,
the `c:init/2` callback is called with the initial argument.
In this case we store the given `html` in server state.
Whenever the kino is rendered on a new client, the `c:handle_connect/1`
callback is called and it builds the initial data for the
client. In this case, we always return the stored `html`.
This initial data is then passed to the JavaScript `init`
function. Keep in mind that while the server is initialized
once, connect may happen at any point, as the users join/refresh
the page.
Finally, the whole point of our example is the ability to
replace the HTML content directly from the Elixir side and
for this purpose we added the public `replace(kino, html)`
function. Underneath the function uses `cast/2` to message
our server and the message is handled with `c:handle_cast/2`.
In this case we store the new `html` in the server state and
broadcast an event with the new value. On the client side,
we subscribe to those events with `ctx.handleEvent(event, callback)`
to update the page accordingly.
## Event handlers
You must eventually register JavaScript handlers for all events
that the client may receive. However, the registration can be
deferred, if the initialization is asynchronous. For example,
the following is perfectly fine:
```js
export function init(ctx, data) {
fetch(data.someUrl).then((resp) => {
ctx.handleEvent("update", (payload) => {
// ...
});
});
}
```
Or alternatively:
```js
export async function init(ctx, data) {
const response = await fetch(data.someUrl);
ctx.handleEvent("update", (payload) => {
// ...
});
}
```
In such case all incoming events are buffered and dispatched once
the handler is registered.
## Binary payloads
The client-server communication supports binary data, both on
initialization and on custom events. On the server side, a binary
payload has the form of `{:binary, info, binary}`, where `info`
is regular JSON-serializable data that can be sent alongside
the plain binary.
On the client side, a binary payload is represented as `[info, buffer]`,
where `info` is the additional data and `buffer` is the binary
as `ArrayBuffer`.
The following example showcases how to send and receive events
with binary payloads.
defmodule Kino.Binary do
use Kino.JS
use Kino.JS.Live
def new() do
Kino.JS.Live.new(__MODULE__, nil)
end
@impl true
def handle_connect(ctx) do
payload = {:binary, %{message: "hello"}, <<1, 2>>}
{:ok, payload, ctx}
end
@impl true
def handle_event("ping", {:binary, _info, binary}, ctx) do
reply_payload = {:binary, %{message: "pong"}, <<1, 2, binary::binary>>}
broadcast_event(ctx, "pong", reply_payload)
{:noreply, ctx}
end
asset "main.js" do
"""
export function init(ctx, payload) {
console.log("initial data", payload);
ctx.handleEvent("pong", ([info, buffer]) => {
console.log("event data", [info, buffer])
});
const buffer = new ArrayBuffer(2);
const bytes = new Uint8Array(buffer);
bytes[0] = 4;
bytes[1] = 250;
ctx.pushEvent("ping", [{ message: "ping" }, buffer]);
}
"""
end
end
'''
defstruct [:module, :pid, :ref]
alias Kino.JS.Live.Context
@opaque t :: %__MODULE__{module: module(), pid: pid(), ref: Kino.Output.ref()}
@type payload :: term() | {:binary, info :: term(), binary()}
@doc """
Invoked when the server is started.
See `c:GenServer.init/1` for more details.
"""
@callback init(arg :: term(), ctx :: Context.t()) ::
{:ok, ctx :: Context.t()} | {:ok, ctx :: Context.t(), opts :: keyword()}
@doc """
Invoked whenever a new client connects to the server.
The returned data is passed to the JavaScript `init` function
of the connecting client.
"""
@callback handle_connect(ctx :: Context.t()) :: {:ok, payload(), ctx :: Context.t()}
@doc """
Invoked to handle client events.
"""
@callback handle_event(event :: String.t(), payload(), ctx :: Context.t()) ::
{:noreply, ctx :: Context.t()}
@doc """
Invoked to handle asynchronous `cast/2` messages.
See `c:GenServer.handle_cast/2` for more details.
"""
@callback handle_cast(msg :: term(), ctx :: Context.t()) :: {:noreply, ctx :: Context.t()}
@doc """
Invoked to handle synchronous `call/3` messages.
See `c:GenServer.handle_call/3` for more details.
"""
@callback handle_call(msg :: term(), from :: term(), ctx :: Context.t()) ::
{:noreply, ctx :: Context.t()} | {:reply, term(), ctx :: Context.t()}
@doc """
Invoked to handle all other messages.
See `c:GenServer.handle_info/2` for more details.
"""
@callback handle_info(msg :: term(), ctx :: Context.t()) :: {:noreply, ctx :: Context.t()}
@doc """
Invoked when the server is about to exit.
See `c:GenServer.terminate/2` for more details.
"""
@callback terminate(reason, ctx :: Context.t()) :: term()
when reason: :normal | :shutdown | {:shutdown, term} | term
@optional_callbacks init: 2,
handle_event: 3,
handle_call: 3,
handle_cast: 2,
handle_info: 2,
terminate: 2
defmacro __using__(_opts) do
quote location: :keep do
@behaviour Kino.JS.Live
import Kino.JS.Live.Context, only: [assign: 2, update: 3, broadcast_event: 3]
@before_compile Kino.JS.Live
end
end
def __before_compile__(env) do
unless Module.defines?(env.module, {:__assets_info__, 0}) do
message = """
make sure to include Kino.JS in #{inspect(env.module)} and define the necessary assets.
use Kino.JS
See Kino.JS for more details.
"""
IO.warn(message, Macro.Env.stacktrace(env))
end
nil
end
@doc """
Instantiates a live JavaScript kino defined by `module`.
The given `init_arg` is passed to the `init/2` callback when
the underlying kino process is started.
"""
@spec new(module(), term()) :: t()
def new(module, init_arg) do
ref = Kino.Output.random_ref()
{:ok, pid} = Kino.start_child({Kino.JS.Live.Server, {module, init_arg, ref}})
%__MODULE__{module: module, pid: pid, ref: ref}
end
@doc false
@spec js_info(t()) :: Kino.Output.js_info()
def js_info(%__MODULE__{} = kino) do
%{
js_view: %{
ref: kino.ref,
pid: kino.pid,
assets: kino.module.__assets_info__()
},
export: nil
}
end
@doc """
Sends an asynchronous request to the kino server.
See `GenServer.cast/2` for more details.
"""
@spec cast(t(), term()) :: :ok
def cast(kino, term) do
Kino.JS.Live.Server.cast(kino.pid, term)
end
@doc """
Makes a synchronous call to the kino server and waits
for its reply.
See `GenServer.call/3` for more details.
"""
@spec call(t(), term(), timeout()) :: term()
def call(kino, term, timeout \\ 5_000) do
Kino.JS.Live.Server.call(kino.pid, term, timeout)
end
end | lib/kino/js/live.ex | 0.882212 | 0.755569 | live.ex | starcoder |
defmodule AisFront.Units.Angle do
alias __MODULE__
alias AisFront.Protocols.Convertible
defstruct value: %Decimal{}, unit: :rad
@si_unit :rad
@unit_si_ratio %{rad: 1, dd: :math.pi |> Decimal.cast |> Decimal.div(180)}
@standard_units Map.keys(@unit_si_ratio)
@compound_degree_units [:dm, :dms]
def si_unit(), do: @si_unit
def unit_si_ratio(), do: @unit_si_ratio
def possible_units(), do: @standard_units ++ @compound_degree_units
# Cast a tuple of number to a tuple of Decimal
defp new_decimal_tuple(tuple) do
tuple
|> Tuple.to_list
|> Enum.map(& Decimal.cast(&1))
|> List.to_tuple
end
# Return the value of of the Angle if value and unit are valid arguments for
# Angle
defp value_or_error(value, unit) do
cond do
unit in @standard_units -> {:ok, Decimal.cast(value)}
unit in @compound_degree_units ->
if not is_tuple(value) do
{:error, "value must be a tuple when unit is a @compound_degree_unit"}
end
case value do
{_d, m} = tuple when m >= 0 -> {:ok, new_decimal_tuple(tuple)}
{_d, m, s} = tuple when m >= 0 and s >= 0 -> {:ok, new_decimal_tuple(tuple)}
_ -> {:error, "Bad tuple for angle value with unit #{unit}"}
end
true -> {:error, "Bad unit #{unit}"}
end
end
@doc """
Check if maybe_angle is a valid Angle
"""
def angle?(%Angle{} = maybe_angle) do
case value_or_error(maybe_angle.value, maybe_angle.unit) do
{:ok, _value} -> true
{:error, _error} -> false
end
end
def angle?(_not_angle), do: false
@doc """
Create a new Angle from value and unit
"""
def new(value, unit \\ @si_unit)
def new(value, unit) do
value = case value_or_error(value, unit) do
{:ok, value} -> value
{:error, reason} -> raise ArgumentError, message: reason
end
%Angle{value: value, unit: unit}
end
@doc """
Convert angle to the rad unit
"""
def to_rad(angle), do: Convertible.convert(angle, :rad)
@doc """
Convert angle to decimal degree unit
"""
def to_dd(angle), do: Convertible.convert(angle, :dd)
@doc """
Convert angle to degree minute unit
"""
def to_dm(angle), do: Convertible.convert(angle, :dm)
@doc """
Convert angle to degree minute second unit
"""
def to_dms(angle), do: Convertible.convert(angle, :dms)
defimpl Convertible do
defp dm_to_dms(%Angle{value: {d, m}, unit: :dm}) do
m_ = Decimal.round(m, 0, :down)
s = m
|> Decimal.sub(m_)
|> Decimal.mult(60)
Angle.new({d,m_,s}, :dms)
end
defp dd_to_dm(%Angle{value: dd, unit: :dd}) do
d = Decimal.round(dd, 0, :down)
m = dd
|> Decimal.sub(d)
|> Decimal.mult(60)
|> Decimal.abs
Angle.new({d,m}, :dm)
end
defp dms_to_dm(%Angle{value: {d,m,s}, unit: :dms}) do
m = s
|> Decimal.div(60)
|> Decimal.add(m)
Angle.new({d,m}, :dm)
end
defp dm_to_dd(%Angle{value: {d,m}, unit: :dm}) do
value = m
|> Decimal.div(60)
|> Decimal.add(d)
Angle.new(value, :dd)
end
def possible_units(_angle), do: Angle.possible_units
def si_unit(_angle), do: Angle.si_unit()
def si?(angle), do: angle.unit == Angle.si_unit
def to_si(%Angle{unit: :dms} = angle), do: angle |> dms_to_dm |> to_si
def to_si(%Angle{unit: :dm} = angle), do: angle |> dm_to_dd |> to_si
def to_si(%Angle{value: value, unit: unit}) do
unit_si_ratio = Angle.unit_si_ratio[unit]
%Angle{
value: Decimal.mult(value, unit_si_ratio),
unit: Angle.si_unit
}
end
@standard_units [:rad, :dd]
@compound_degree_units [:dm, :dms]
@verbose_degrees %{
dms: :degrees_minutes_seconds,
dm: :degrees_minutes,
dd: :decimal_degrees
}
@verbose_degree_values Map.values(@verbose_degrees)
def convert(angle, to_unit) when to_unit in @verbose_degree_values do
convert(angle, @verbose_degrees[to_unit])
end
# Is there a better way ?
def convert(%Angle{unit: :dm} = angle, :dms), do: dm_to_dms(angle)
def convert(%Angle{unit: :dd} = angle, :dm), do: dd_to_dm(angle)
def convert(%Angle{unit: :dms} = angle, :dm), do: dms_to_dm(angle)
def convert(%Angle{unit: :dm} = angle, :dd), do: dm_to_dd(angle)
def convert(%Angle{unit: :dms} = angle, :dd), do: angle |> dms_to_dm |> convert(:dd)
def convert(%Angle{unit: :dd} = angle, :dms), do: angle |> dd_to_dm |> convert(:dms)
def convert(%Angle{} = angle, to_unit) when to_unit in @compound_degree_units do
angle |> convert(:dd) |> convert(to_unit)
end
def convert(%Angle{} = angle, to_unit) when to_unit in @standard_units do
%Angle{value: value} = to_si(angle)
unit_si_ratio = Angle.unit_si_ratio[to_unit]
%Angle{
value: Decimal.div(value, unit_si_ratio),
unit: to_unit
}
end
end
defimpl String.Chars do
@unit_repr %{rad: "rad", dd: "°", min: "'", sec: "''"}
@unit_precision %{rad: 8, dd: 6, min: 5, sec: 4}
def to_string(%Angle{value: {d,m}, unit: :dm}) do
m = m |> Decimal.round(@unit_precision[:min]) |> Decimal.reduce
"#{Decimal.to_string(d, :normal)}#{@unit_repr[:dd]}#{m}#{@unit_repr[:min]}"
end
def to_string(%Angle{value: {d,m,s}, unit: :dms}) do
s = s |> Decimal.round(@unit_precision[:sec]) |> Decimal.reduce
"#{Decimal.to_string(d, :normal)}#{@unit_repr[:dd]}#{m}#{@unit_repr[:min]}#{s}#{@unit_repr[:sec]}"
end
def to_string(%Angle{value: value, unit: unit}) do
value = value |> Decimal.round(@unit_precision[unit]) |> Decimal.reduce
"#{Decimal.to_string(value, :normal)}#{@unit_repr[unit]}"
end
end
end | lib/ais_front/units/angle.ex | 0.852568 | 0.695364 | angle.ex | starcoder |
defmodule ExRabbitMQ.Consumer do
@moduledoc """
A behaviour module that abstracts away the handling of RabbitMQ connections and channels.
It abstracts the handling of message delivery and acknowlegement.
It also provides hooks to allow the programmer to wrap the consumption of a message without having to directly
access the AMPQ interfaces.
For a connection configuration example see `ExRabbitMQ.Connection.Config`.
For a queue configuration example see `ExRabbitMQ.Consumer.QueueConfig`.
#### Example usage for a consumer implementing a `GenServer`
```elixir
defmodule MyExRabbitMQConsumer do
@module __MODULE__
use GenServer
use ExRabbitMQ.Consumer, GenServer
def start_link() do
GenServer.start_link(@module, :ok)
end
def init(state) do
new_state =
xrmq_init(:my_connection_config, :my_queue_config, state)
|> xrmq_extract_state()
{:ok, new_state}
end
# required override
def xrmq_basic_deliver(payload, meta, state) do
# your message delivery logic goes here...
{:noreply, state}
end
# optional override when there is a need to do setup the channel right after the connection has been established.
def xrmq_channel_setup(channel, state) do
# any channel setup goes here...
{:ok, state}
end
# optional override when there is a need to setup the queue and/or exchange just before the consume.
def xrmq_queue_setup(channel, queue, state) do
# The default queue setup uses the exchange, exchange_opts, bind_opts and qos_opts from
# the queue's configuration to setup the QoS, declare the exchange and bind it with the queue.
# Your can override this function, but you can also keep this functionality of the automatic queue setup by
# calling super, eg:
{:ok, state} = super(channel, queue, state)
# any other queue setup goes here...
end
end
```
"""
@doc """
Initiates a connection or reuses an existing one.
When a connection is established then a new channel is opened.
Next, `c:xrmq_channel_setup/2` is called to do any extra work on the opened channel.
If `start_consuming` is `true` then `c:xrmq_consume/1` is called automatically.
This variant accepts atoms as the arguments for the `connection_key` and `queue_key` parameters,
and uses these atoms to read the consumer's configuration.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
For the configuration format see the top section of `ExRabbitMQ.Consumer`.
"""
@callback xrmq_init(
connection_key :: atom,
queue_key :: atom,
start_consuming :: boolean,
state :: term
) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
Initiates a connection or reuses an existing one.
When a connection is established then a new channel is opened.
Next, `c:xrmq_channel_setup/2` is called to do any extra work on the opened channel.
If `start_consuming` is `true` then `c:xrmq_consume/1` is called automatically.
This variant accepts an atom as the argument for the `connection_key` parameter and
a `ExRabbitMQ.Consumer.QueueConfig` struct as the argument for the `queue_config` parameter.
The `connection_key` atom argument is used to read the connection's configuration.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
For the configuration format see the top section of `ExRabbitMQ.Consumer`.
"""
@callback xrmq_init(
connection_key :: atom,
queue_config :: struct,
start_consuming :: boolean,
state :: term
) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
Initiates a connection or reuses an existing one.
When a connection is established then a new channel is opened.
Next, `c:xrmq_channel_setup/2` is called to do any extra work on the opened channel.
If `start_consuming` is `true` then `c:xrmq_consume/1` is called automatically.
This variant accepts a `ExRabbitMQ.Connection` struct as the argument for the `connection_config` parameter and
an atom as the argument for the `queue_key` parameter.
The `queue_key` atom argument is used to read the queue's configuration.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
For the configuration format see the top section of `ExRabbitMQ.Consumer`.
"""
@callback xrmq_init(
connection_config :: struct,
queue_key :: atom,
start_consuming :: boolean,
state :: term
) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
Initiates a connection or reuses an existing one.
When a connection is established then a new channel is opened.
Next, `c:xrmq_channel_setup/2` is called to do any extra work on the opened channel.
If `start_consuming` is `true` then `c:xrmq_consume/1` is called automatically.
This variant accepts a `ExRabbitMQ.Connection` and a `ExRabbitMQ.Consumer.QueueConfig` structs
as the arguments for the `connection_config` and `queue_config` parameters.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
For the configuration format see the top section of `ExRabbitMQ.Consumer`.
"""
@callback xrmq_init(
connection_config :: struct,
queue_config :: struct,
start_consuming :: boolean,
state :: term
) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
Returns a part of the `:exrabbitmq` configuration section, specified with the
`key` argument.
For the configuration format see the top section of `ExRabbitMQ.Consumer`.
"""
@callback xrmq_get_env_config(key :: atom) :: keyword
@doc """
Returns the connection configuration as it was passed to `c:xrmq_init/4`.
This configuration is set in the wrapper process's dictionary.
For the configuration format see the top section of `ExRabbitMQ.Consumer`.
"""
@callback xrmq_get_connection_config() :: term
@doc """
Returns the queue configuration as it was passed to `c:xrmq_init/4`.
This configuration is set in the wrapper process's dictionary.
For the configuration format see the top section of `ExRabbitMQ.Consumer`.
"""
@callback xrmq_get_queue_config() :: term
@doc """
This hook is called when a connection has been established and a new channel has been opened.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_channel_setup(channel :: term, state :: term) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
This hook is called when a connection has been established and a new channel has been opened,
right after `c:xrmq_channel_setup/2`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_channel_open(channel :: term, state :: term) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
This hook is called automatically, if `start_consuming` was `true` when `c:xrmq_init/4`.
If not, then the user has to call it to start consuming.
It is invoked when a connection has been established and a new channel has been opened.
Its flow is to:
1. Declare the queue
2. Run `c:xrmq_queue_setup/3`
3. Start consuming from the queue
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_consume(state :: term) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
This hook is called automatically as part of the flow in `c:xrmq_consume/1`.
It allows the user to run extra queue setup steps when the queue has been declared.
The default queue setup uses the exchange, exchange_opts, bind_opts and qos_opts from
the queue's configuration to setup the QoS, declare the exchange and bind it with the queue.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_queue_setup(channel :: term, queue :: String.t(), state :: term) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
This callback is the only required callback (i.e., without any default implementation) and
is called as a response to a `:basic_consume` message.
It is passed the `payload` of the request as well as the `meta` object or the message.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_deliver(payload :: term, meta :: term, state :: term) ::
{:noreply, new_state :: term}
| {:noreply, new_state :: term, timeout | :hibernate}
| {:noreply, [event :: term], new_state :: term}
| {:noreply, [event :: term], new_state :: term, :hibernate}
| {:stop, reason :: term, new_state :: term}
@doc """
This overridable hook is called as a response to a `:basic_cancel` message.
It is passed the `cancellation_info` of the request and by default it logs an error and
returns `{:stop, :basic_cancel, state}`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_cancel(cancellation_info :: any, state :: any) ::
{:noreply, new_state :: term}
| {:noreply, new_state :: term, timeout | :hibernate}
| {:noreply, [event :: term], new_state :: term}
| {:noreply, [event :: term], new_state :: term, :hibernate}
| {:stop, reason :: term, new_state :: term}
@doc """
This overridable function can be called whenever `no_ack` is set to `false` and the user
wants to *ack* a message.
It is passed the `delivery_tag` of the request and by default it simply *acks* the message
as per the RabbitMQ API.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_ack(delivery_tag :: String.t(), state :: term) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
This overridable function can be called whenever `no_ack` is set to `false` and the user wants
to reject a message.
It is passed the `delivery_tag` of the request and by default it simply rejects the message
as per the RabbitMQ API.
This function simply calls `c:xrmq_basic_reject/3` with `opts` set to `[]`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_reject(delivery_tag :: String.t(), state :: term) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
This overridable function can be called whenever `no_ack` is set to `false` and the user wants
to reject a message.
It is passed the `delivery_tag` of the request and by default it simply rejects the message
as per the RabbitMQ API.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_reject(delivery_tag :: String.t(), opts :: term, state :: term) ::
{:ok, new_state :: term}
| {:error, reason :: term, new_state :: term}
@doc """
This overridable function publishes the `payload` to the `exchange` using the provided `routing_key`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_publish(
payload :: term,
exchange :: String.t(),
routing_key :: String.t(),
opts :: [term]
) ::
:ok
| {:error, reason :: :blocked | :closing | :no_channel}
@doc """
Helper function that extracts the `state` argument from the passed in tuple.
"""
@callback xrmq_extract_state({:ok, state :: term} | {:error, reason :: term, state :: term}) ::
state :: term
require ExRabbitMQ.AST.Common
require ExRabbitMQ.AST.Consumer.GenServer
require ExRabbitMQ.AST.Consumer.GenStage
# credo:disable-for-next-line
defmacro __using__({:__aliases__, _, [kind]})
when kind in [:GenServer, :GenStage] do
common_ast = ExRabbitMQ.AST.Common.ast()
inner_ast =
if kind === :GenServer do
ExRabbitMQ.AST.Consumer.GenServer.ast()
else
ExRabbitMQ.AST.Consumer.GenStage.ast()
end
# credo:disable-for-next-line
quote location: :keep do
require Logger
@behaviour ExRabbitMQ.Consumer
alias ExRabbitMQ.Constants
alias ExRabbitMQ.Connection
alias ExRabbitMQ.Connection.Config, as: ConnectionConfig
alias ExRabbitMQ.Consumer.QueueConfig
alias ExRabbitMQ.ChannelRipper
unquote(inner_ast)
def xrmq_init(connection_config_spec, queue_config_spec, start_consuming \\ true, state)
def xrmq_init(connection_key, queue_key, start_consuming, state)
when is_atom(connection_key) and is_atom(queue_key) do
xrmq_init(
xrmq_get_connection_config(connection_key),
xrmq_get_queue_config(queue_key),
start_consuming,
state
)
end
def xrmq_init(connection_key, %QueueConfig{} = queue_config, start_consuming, state)
when is_atom(connection_key) do
xrmq_init(
xrmq_get_connection_config(connection_key),
queue_config,
start_consuming,
state
)
end
def xrmq_init(%ConnectionConfig{} = connection_config, queue_key, start_consuming, state)
when is_atom(queue_key) do
xrmq_init(connection_config, xrmq_get_queue_config(queue_key), start_consuming, state)
end
def xrmq_init(
%ConnectionConfig{} = connection_config,
%QueueConfig{} = queue_config,
start_consuming,
state
) do
connection_config = xrmq_set_connection_config_defaults(connection_config)
queue_config = xrmq_set_queue_config_defaults(queue_config)
connection_pids_group_name = Constants.connection_pids_group_name()
connection_pids =
case :pg2.get_local_members(connection_pids_group_name) do
[] -> []
[_pid | _rest_pids] = pids -> pids
{:error, {:no_such_group, ^connection_pids_group_name}} -> []
end
connection_pid =
case Enum.find(connection_pids, fn c -> Connection.subscribe(c, connection_config) end) do
nil ->
{:ok, pid} = ExRabbitMQ.Connection.Supervisor.start_child(connection_config)
Connection.subscribe(pid, connection_config)
pid
pid ->
pid
end
Process.link(connection_pid)
xrmq_set_connection_pid(connection_pid)
xrmq_set_connection_config(connection_config)
xrmq_set_queue_config(queue_config)
{:ok, channel_ripper_pid} = ChannelRipper.start()
xrmq_set_channel_ripper_pid(channel_ripper_pid)
if start_consuming do
xrmq_open_channel_consume(state)
else
xrmq_open_channel(state)
end
end
defp xrmq_open_channel_consume(state) do
with {:ok, state} <- xrmq_open_channel(state),
{:ok, state} = result_ok <- xrmq_consume(state) do
result_ok
else
error -> error
end
end
def xrmq_consume(state) do
{channel, _} = xrmq_get_channel_info()
config = xrmq_get_queue_config()
if channel === nil or config === nil do
nil
else
with {:ok, %{queue: queue}} <-
AMQP.Queue.declare(channel, config.queue, config.queue_opts),
{:ok, state} <- xrmq_queue_setup(channel, queue, state),
{:ok, _} <- AMQP.Basic.consume(channel, queue, nil, config.consume_opts) do
{:ok, state}
else
{:error, reason, state} = error -> error
{:error, reason} -> {:error, reason, state}
error -> {:error, error, state}
end
end
end
def xrmq_queue_setup(channel, queue, state) do
config = xrmq_get_queue_config()
with :ok <- xrmq_qos_setup(channel, config),
:ok <- xrmq_exchange_declare(channel, config),
:ok <- xrmq_queue_bind(channel, queue, config) do
{:ok, state}
else
{:error, reason} -> {:error, reason, state}
end
end
defp xrmq_qos_setup(_channel, %{qos_opts: []}) do
:ok
end
defp xrmq_qos_setup(channel, %{qos_opts: opts}) do
AMQP.Basic.qos(channel, opts)
end
defp xrmq_exchange_declare(_channel, %{exchange: nil}) do
:ok
end
defp xrmq_exchange_declare(channel, %{exchange: exchange, exchange_opts: opts}) do
AMQP.Exchange.declare(channel, exchange, opts[:type] || :direct, opts)
end
defp xrmq_queue_bind(_channel, _queue, %{exchange: nil}) do
:ok
end
defp xrmq_queue_bind(channel, queue, %{exchange: exchange, bind_opts: opts}) do
AMQP.Queue.bind(channel, queue, exchange, opts)
end
def xrmq_basic_ack(delivery_tag, state) do
case xrmq_get_channel_info() do
{nil, _} ->
{:error, Constants.no_channel_error(), state}
{channel, _} ->
try do
case AMQP.Basic.ack(channel, delivery_tag) do
:ok -> {:ok, state}
error -> {:error, error, state}
end
catch
:exit, reason ->
{:error, reason, state}
end
end
end
def xrmq_basic_reject(delivery_tag, state) do
xrmq_basic_reject(delivery_tag, [], state)
end
def xrmq_basic_reject(delivery_tag, opts, state) do
case xrmq_get_channel_info() do
{nil, _} ->
{:error, Constants.no_channel_error(), state}
{channel, _} ->
try do
case AMQP.Basic.reject(channel, delivery_tag, opts) do
:ok -> {:ok, state}
error -> {:error, error, state}
end
catch
:exit, reason ->
{:error, reason, state}
end
end
end
defp xrmq_set_queue_config_defaults(%QueueConfig{} = config) do
%QueueConfig{
queue: config.queue || "",
queue_opts: config.queue_opts || [],
consume_opts: config.consume_opts || [],
exchange: config.exchange || nil,
exchange_opts: config.exchange_opts || [],
bind_opts: config.bind_opts || [],
qos_opts: config.qos_opts || []
}
end
def xrmq_get_queue_config() do
Process.get(Constants.queue_config_key())
end
defp xrmq_get_queue_config(key) do
config = xrmq_get_env_config(key)
%QueueConfig{
queue: config[:queue],
queue_opts: config[:queue_opts],
consume_opts: config[:consume_opts],
exchange: config[:exchange],
exchange_opts: config[:exchange_opts],
bind_opts: config[:bind_opts],
qos_opts: config[:qos_opts]
}
end
defp xrmq_set_queue_config(config) do
if config === nil do
Process.delete(Constants.queue_config_key())
else
Process.put(Constants.queue_config_key(), config)
end
end
unquote(common_ast)
defoverridable xrmq_queue_setup: 3,
xrmq_basic_cancel: 2,
xrmq_basic_ack: 2,
xrmq_basic_reject: 2,
xrmq_basic_reject: 3
end
end
end | lib/ex_rabbit_m_q/consumer.ex | 0.885854 | 0.811974 | consumer.ex | starcoder |
defprotocol Calendar.ContainsDate do
@doc """
Returns a Calendar.Date struct for the struct in question
"""
def date_struct(data)
end
defmodule Calendar.Date do
@moduledoc """
The Date module provides a struct to represent a simple date: year, month and day.
"""
@doc """
Takes a Date struct and returns an erlang style date tuple.
"""
def to_erl(date) do
date = date |> contained_date
{date.year, date.month, date.day}
end
@doc """
Takes a erlang style date tuple and returns a tuple with an :ok tag and a
Date struct. If the provided date is invalid, it will not be tagged with :ok
though as shown below:
iex> from_erl({2014,12,27})
{:ok, %Date{day: 27, month: 12, year: 2014}}
iex> from_erl({2014,99,99})
{:error, :invalid_date}
"""
def from_erl({year, month, day}) do
case :calendar.valid_date({year, month, day}) do
true -> {:ok, %Date{year: year, month: month, day: day}}
false -> {:error, :invalid_date}
end
end
@doc """
Like from_erl without the exclamation point, but does not return a tuple
with a tag. Instead returns just a Date if valid. Or raises an exception if
the provided date is invalid.
iex> from_erl! {2014,12,27}
%Date{day: 27, month: 12, year: 2014}
"""
def from_erl!(erl_date) do
{:ok, date} = from_erl(erl_date)
date
end
@doc """
Takes a Date struct and returns the number of days in the month of that date.
The day of the date provided does not matter - the result is based on the
month and the year.
iex> from_erl!({2014,12,27}) |> number_of_days_in_month
31
iex> from_erl!({2015,2,27}) |> number_of_days_in_month
28
iex> from_erl!({2012,2,27}) |> number_of_days_in_month
29
"""
def number_of_days_in_month(date) do
date = date |> contained_date
{year, month, _} = Calendar.ContainsDate.date_struct(date) |> to_erl
:calendar.last_day_of_the_month(year, month)
end
@doc """
Takes a Date struct and returns a tuple with the ISO week number
and the year that the week belongs to.
Note that the year returned is not always the same as the year provided
as an argument.
iex> from_erl!({2014, 12, 31}) |> week_number
{2015, 1}
iex> from_erl!({2014, 12, 27}) |> week_number
{2014, 52}
iex> from_erl!({2016, 1, 3}) |> week_number
{2015, 53}
"""
def week_number(date) do
date
|> contained_date
|> to_erl
|> :calendar.iso_week_number
end
@doc """
Takes a year and an ISO week number and returns a list with the dates in that week.
iex> dates_for_week_number(2015, 1)
[%Date{day: 29, month: 12, year: 2014}, %Date{day: 30, month: 12, year: 2014},
%Date{day: 31, month: 12, year: 2014}, %Date{day: 1, month: 1, year: 2015},
%Date{day: 2, month: 1, year: 2015}, %Date{day: 3, month: 1, year: 2015},
%Date{day: 4, month: 1, year: 2015}]
iex> dates_for_week_number(2015, 2)
[%Date{day: 5, month: 1, year: 2015}, %Date{day: 6, month: 1, year: 2015},
%Date{day: 7, month: 1, year: 2015}, %Date{day: 8, month: 1, year: 2015},
%Date{day: 9, month: 1, year: 2015}, %Date{day: 10, month: 1, year: 2015},
%Date{day: 11, month: 1, year: 2015}]
iex> dates_for_week_number(2015, 53)
[%Date{day: 28, month: 12, year: 2015}, %Date{day: 29, month: 12, year: 2015},
%Date{day: 30, month: 12, year: 2015}, %Date{day: 31, month: 12, year: 2015},
%Date{day: 1, month: 1, year: 2016}, %Date{day: 2, month: 1, year: 2016},
%Date{day: 3, month: 1, year: 2016}]
"""
def dates_for_week_number(year, week_num) do
days = days_after_until(from_erl!({year-1, 12, 23}), from_erl!({year, 12, 31})) |> Enum.to_list
days = days ++ first_seven_dates_of_year(year)
days
|> Enum.filter(fn(x) -> in_week?(x, year, week_num) end)
end
defp first_seven_dates_of_year(year) do
[ from_erl!({year+1, 1, 1}),
from_erl!({year+1, 1, 2}),
from_erl!({year+1, 1, 3}),
from_erl!({year+1, 1, 4}),
from_erl!({year+1, 1, 5}),
from_erl!({year+1, 1, 6}),
from_erl!({year+1, 1, 7}),
]
end
@doc "Like dates_for_week_number/2 but takes a tuple of {year, week_num} instead"
def dates_for_week_number({year, week_num}), do: dates_for_week_number(year, week_num)
@doc """
Takes a date, a year and an ISO week number and returns true if the date is in
the week.
iex> {2015, 1, 1} |> in_week?(2015, 1)
true
iex> {2015, 5, 5} |> in_week?(2015, 1)
false
"""
def in_week?(date, year, week_num) do
date |> week_number == {year, week_num}
end
@doc """
Takes a Date struct and returns the number of gregorian days since year 0.
iex> from_erl!({2014,12,27}) |> to_gregorian_days
735959
"""
def to_gregorian_days(date) do
date = date |> contained_date
:calendar.date_to_gregorian_days(date.year, date.month, date.day)
end
defp from_gregorian_days!(days) do
:calendar.gregorian_days_to_date(days) |> from_erl!
end
@doc """
Takes a Date struct and returns another one representing the next day.
iex> from_erl!({2014,12,27}) |> next_day!
%Date{day: 28, month: 12, year: 2014}
iex> from_erl!({2014,12,31}) |> next_day!
%Date{day: 1, month: 1, year: 2015}
"""
def next_day!(date) do
advance!(date, 1)
end
@doc """
Takes a Date struct and returns another one representing the previous day.
iex> from_erl!({2014,12,27}) |> prev_day!
%Date{day: 26, month: 12, year: 2014}
"""
def prev_day!(date) do
advance!(date, -1)
end
@doc """
Difference in days between two dates.
Takes two Date structs: `first_date` and `second_date`.
Subtracts `second_date` from `first_date`.
iex> from_erl!({2014,12,27}) |> diff(from_erl!({2014,12,20}))
7
iex> from_erl!({2014,12,27}) |> diff(from_erl!({2014,12,29}))
-2
"""
def diff(first_date_cont, second_date_cont) do
first_date = contained_date(first_date_cont)
second_date = contained_date(second_date_cont)
to_gregorian_days(first_date) - to_gregorian_days(second_date)
end
@doc """
Returns true if the first date is before the second date
iex> from_erl!({2014,12,27}) |> before?(from_erl!({2014,12,20}))
false
iex> from_erl!({2014,12,27}) |> before?(from_erl!({2014,12,29}))
true
"""
def before?(first_date_cont, second_date_cont) do
diff(first_date_cont, second_date_cont) < 0
end
@doc """
Returns true if the first date is after the second date
iex> from_erl!({2014,12,27}) |> after?(from_erl!({2014,12,20}))
true
iex> from_erl!({2014,12,27}) |> after?(from_erl!({2014,12,29}))
false
"""
def after?(first_date_cont, second_date_cont) do
diff(first_date_cont, second_date_cont) > 0
end
@doc """
Takes two variables that contain a date.
Returns true if the dates are the same.
iex> from_erl!({2014,12,27}) |> same_date?(from_erl!({2014,12,27}))
true
iex> from_erl!({2014,12,27}) |> same_date?({2014,12,27})
true
iex> from_erl!({2014,12,27}) |> same_date?(from_erl!({2014,12,29}))
false
"""
def same_date?(first_date_cont, second_date_cont) do
diff(first_date_cont, second_date_cont) == 0
end
@doc """
Advances `date` by `days` number of days.
## Examples
# Date struct advanced by 3 days
iex> from_erl!({2014,12,27}) |> advance(3)
{:ok, %Date{day: 30, month: 12, year: 2014} }
# Date struct turned back 2 days
iex> from_erl!({2014,12,27}) |> advance(-2)
{:ok, %Date{day: 25, month: 12, year: 2014} }
# Date tuple turned back 2 days
iex> {2014,12,27} |> advance(-2)
{:ok, %Date{day: 25, month: 12, year: 2014} }
# When passing a DateTime, NaiveDateTime or datetime tuple
# the time part is ignored. A Date struct is returned.
iex> {{2014,12,27}, {21,30,59}} |> Calendar.NaiveDateTime.from_erl! |> advance(-2)
{:ok, %Date{day: 25, month: 12, year: 2014} }
iex> {{2014,12,27}, {21,30,59}} |> advance(-2)
{:ok, %Date{day: 25, month: 12, year: 2014} }
"""
def advance(date, days) when is_integer(days) do
date = date |> contained_date
result = to_gregorian_days(date) + days
|> from_gregorian_days!
{:ok, result}
end
def add(date, days), do: advance(date, days)
def add!(date, days), do: advance!(date, days)
@doc """
Subtract `days` number of days from date.
## Examples
# Date struct turned back 2 days
iex> from_erl!({2014,12,27}) |> subtract(2)
{:ok, %Date{day: 25, month: 12, year: 2014} }
# Date tuple turned back 2 days
iex> {2014,12,27} |> subtract(2)
{:ok, %Date{day: 25, month: 12, year: 2014} }
# When passing a DateTime, Calendar.NaiveDateTime or datetime tuple
# the time part is ignored. A Date struct is returned.
iex> {{2014,12,27}, {21,30,59}} |> Calendar.NaiveDateTime.from_erl! |> subtract(2)
{:ok, %Date{day: 25, month: 12, year: 2014} }
iex> {{2014,12,27}, {21,30,59}} |> subtract(2)
{:ok, %Date{day: 25, month: 12, year: 2014} }
"""
def subtract(date, days), do: advance(date, -1 * days)
def subtract!(date, days), do: advance!(date, -1 * days)
@doc """
Like `advance/2`, but returns the result directly - not tagged with :ok.
This function might raise an error.
## Examples
iex> from_erl!({2014,12,27}) |> advance!(3)
%Date{day: 30, month: 12, year: 2014}
iex> {2014,12,27} |> advance!(-2)
%Date{day: 25, month: 12, year: 2014}
"""
def advance!(date, days) when is_integer(days) do
date = date |> contained_date
{:ok, result} = advance(date, days)
result
end
@doc """
DEPRECATED. Use `Calendar.Strftime.strftime!/3` instead - it works the same way.
"""
def strftime!(date, string, lang \\ :en) do
IO.puts :stderr, "Warning: strftime!/1 in Calendar.Date is deprecated." <>
"The function has been moved so use Calendar.Strftime.strftime! instead. " <>
Exception.format_stacktrace()
date = contained_date(date)
date_erl = date |> to_erl
{date_erl, {0, 0, 0}}
|> Calendar.NaiveDateTime.from_erl!
|> Calendar.Strftime.strftime!(string, lang)
end
@doc """
Stream of dates after the date provided as argument.
iex> days_after({2014,12,27}) |> Enum.take(6)
[%Date{day: 28, month: 12, year: 2014}, %Date{day: 29, month: 12, year: 2014},
%Date{day: 30, month: 12, year: 2014}, %Date{day: 31, month: 12, year: 2014}, %Date{day: 1, month: 1, year: 2015},
%Date{day: 2, month: 1, year: 2015}]
"""
def days_after(from_date) do
from_date = from_date |> contained_date
Stream.unfold(next_day!(from_date), fn n -> {n, n |> next_day!} end)
end
@doc """
Stream of dates before the date provided as argument.
iex> days_before(from_erl!({2014,12,27})) |> Enum.take(3)
[%Date{day: 26, month: 12, year: 2014}, %Date{day: 25, month: 12, year: 2014},
%Date{day: 24, month: 12, year: 2014}]
"""
def days_before(from_date) do
from_date = from_date |> contained_date
Stream.unfold(prev_day!(from_date), fn n -> {n, n |> prev_day!} end)
end
@doc """
Get a stream of dates. Takes a starting date and an end date. Includes end date.
Does not include start date unless `true` is passed
as the third argument.
iex> days_after_until({2014,12,27}, {2014,12,29}) |> Enum.to_list
[%Date{day: 28, month: 12, year: 2014}, %Date{day: 29, month: 12, year: 2014}]
iex> days_after_until({2014,12,27}, {2014,12,29}, true) |> Enum.to_list
[%Date{day: 27, month: 12, year: 2014}, %Date{day: 28, month: 12, year: 2014}, %Date{day: 29, month: 12, year: 2014}]
"""
def days_after_until(from_date, until_date, include_from_date \\ false)
def days_after_until(from_date, until_date, _include_from_date = false) do
from_date = from_date |> contained_date
until_date = until_date |> contained_date
Stream.unfold(next_day!(from_date), fn n -> if n == next_day!(until_date) do nil else {n, n |> next_day!} end end)
end
def days_after_until(from_date, until_date, _include_from_date = true) do
before_from_date = from_date |> contained_date |> prev_day!
days_after_until(before_from_date, until_date)
end
@doc """
Get a stream of dates going back in time. Takes a starting date and an end date. Includes end date.
End date should be before start date.
Does not include start date unless `true` is passed
as the third argument.
iex> days_before_until({2014,12,27}, {2014,12,24}) |> Enum.to_list
[%Date{day: 26, month: 12, year: 2014}, %Date{day: 25, month: 12, year: 2014}, %Date{day: 24, month: 12, year: 2014}]
iex> days_before_until({2014,12,27}, {2014,12,24}, false) |> Enum.to_list
[%Date{day: 26, month: 12, year: 2014}, %Date{day: 25, month: 12, year: 2014}, %Date{day: 24, month: 12, year: 2014}]
iex> days_before_until({2014,12,27}, {2014,12,24}, true) |> Enum.to_list
[%Date{day: 27, month: 12, year: 2014}, %Date{day: 26, month: 12, year: 2014}, %Date{day: 25, month: 12, year: 2014}, %Date{day: 24, month: 12, year: 2014}]
"""
def days_before_until(from_date, until_date, include_from_date \\ false)
def days_before_until(from_date, until_date, _include_from_date = false) do
from_date = from_date |> contained_date
until_date = until_date |> contained_date
Stream.unfold(prev_day!(from_date), fn n -> if n == prev_day!(until_date) do nil else {n, n |> prev_day!} end end)
end
def days_before_until(from_date, until_date, _include_from_date = true) do
from_date
|> contained_date
|> next_day!
|> days_before_until(until_date)
end
@doc """
Day of the week as an integer. Monday is 1, Tuesday is 2 and so on.
ISO-8601. Sunday is 7.
Results can be between 1 and 7.
See also `day_of_week_zb/1`
## Examples
iex> {2015, 7, 6} |> day_of_week # Monday
1
iex> {2015, 7, 7} |> day_of_week # Tuesday
2
iex> {2015, 7, 5} |> day_of_week # Sunday
7
"""
def day_of_week(date) do
date
|> contained_date
|> to_erl
|> :calendar.day_of_the_week
end
@doc """
The name of the day of the week as a string.
Takes a language code as the second argument. Defaults to :en for English.
## Examples
iex> {2015, 7, 6} |> day_of_week_name # Monday
"Monday"
iex> {2015, 7, 7} |> day_of_week_name # Tuesday
"Tuesday"
iex> {2015, 7, 5} |> day_of_week_name # Sunday
"Sunday"
"""
def day_of_week_name(date, lang\\:en) do
date
|> contained_date
|> Calendar.Strftime.strftime!("%A", lang)
end
@doc """
Day of the week as an integer with Sunday being 0.
Monday is 1, Tuesday is 2 and so on. Results can be
between 0 and 6.
## Examples
iex> {2015, 7, 5} |> day_of_week_zb # Sunday
0
iex> {2015, 7, 6} |> day_of_week_zb # Monday
1
iex> {2015, 7, 7} |> day_of_week_zb # Tuesday
2
"""
def day_of_week_zb(date) do
num = date |> day_of_week
case num do
7 -> 0
_ -> num
end
end
@doc """
Day number in year for provided `date`.
## Examples
iex> {2015, 1, 1} |> day_number_in_year
1
iex> {2015, 2, 1} |> day_number_in_year
32
# 2015 has 365 days
iex> {2015, 12, 31} |> day_number_in_year
365
# 2000 was leap year and had 366 days
iex> {2000, 12, 31} |> day_number_in_year
366
"""
def day_number_in_year(date) do
date = date |> contained_date
day_count_previous_months = Enum.map(previous_months_for_month(date.month),
fn month ->
:calendar.last_day_of_the_month(date.year, month)
end)
|> Enum.reduce(0, fn(day_count, acc) -> day_count + acc end)
day_count_previous_months+date.day
end
# a list or range of previous month names
defp previous_months_for_month(1), do: []
defp previous_months_for_month(month) do
1..(month-1)
end
@doc """
Returns `true` if the `date` is a Monday.
## Examples
iex> {2015, 7, 6} |> monday?
true
iex> {2015, 7, 7} |> monday?
false
"""
def monday?(date), do: day_of_week(date) == 1
@doc """
Returns `true` if the `date` is a Tuesday.
## Examples
iex> {2015, 7, 6} |> tuesday?
false
iex> {2015, 7, 7} |> tuesday?
true
"""
def tuesday?(date), do: day_of_week(date) == 2
@doc """
Returns `true` if the `date` is a Wednesday.
## Examples
iex> {2015, 7, 8} |> wednesday?
true
iex> {2015, 7, 9} |> wednesday?
false
"""
def wednesday?(date), do: day_of_week(date) == 3
@doc """
Returns `true` if the `date` is a Thursday.
## Examples
iex> {2015, 7, 9} |> thursday?
true
iex> {2015, 7, 7} |> thursday?
false
"""
def thursday?(date), do: day_of_week(date) == 4
@doc """
Returns `true` if the `date` is a Friday.
## Examples
iex> {2015, 7, 10} |> friday?
true
iex> {2015, 7, 7} |> friday?
false
"""
def friday?(date), do: day_of_week(date) == 5
@doc """
Returns `true` if the `date` is a Saturday.
## Examples
iex> {2015, 7, 11} |> saturday?
true
iex> {2015, 7, 7} |> saturday?
false
"""
def saturday?(date), do: day_of_week(date) == 6
@doc """
Returns `true` if the `date` is a Sunday.
## Examples
iex> {2015, 7, 12} |> sunday?
true
iex> {2015, 7, 7} |> sunday?
false
"""
def sunday?(date), do: day_of_week(date) == 7
@doc """
## Examples
iex> from_ordinal(2015, 1)
{:ok, %Date{day: 1, month: 1, year: 2015}}
iex> from_ordinal(2015, 270)
{:ok, %Date{day: 27, month: 9, year: 2015}}
iex> from_ordinal(2015, 999)
{:error, :invalid_ordinal_date}
"""
def from_ordinal(year, ordinal_day) do
list = days_after_until({year-1, 12, 31}, {year, 12, 31})
|> Enum.to_list
do_from_ordinal(year, ordinal_day, list)
end
defp do_from_ordinal(year, ordinal_day, [head|tail]) do
if day_number_in_year(head) == ordinal_day do
{:ok, head}
else
do_from_ordinal(year, ordinal_day, tail)
end
end
defp do_from_ordinal(_, _, []), do: {:error, :invalid_ordinal_date}
@doc """
## Examples
iex> from_ordinal!(2015, 1)
%Date{day: 1, month: 1, year: 2015}
iex> from_ordinal!(2015, 270)
%Date{day: 27, month: 9, year: 2015}
iex> from_ordinal!(2015, 365)
%Date{day: 31, month: 12, year: 2015}
"""
def from_ordinal!(year, ordinal_day) do
{:ok, result} = from_ordinal(year, ordinal_day)
result
end
@doc """
Returns a string with the date in ISO format.
## Examples
iex> {2015, 7, 12} |> to_s
"2015-07-12"
iex> {2015, 7, 7} |> to_s
"2015-07-07"
"""
def to_s(date) do
date
|> contained_date
|> Calendar.Strftime.strftime!("%Y-%m-%d")
end
@doc """
Returns the date for the time right now in UTC.
## Examples
> today_utc
%Date{day: 1, month: 3, year: 2016}
"""
def today_utc do
Calendar.DateTime.now_utc
|> Calendar.DateTime.to_date
end
@doc """
Returns the date for the time right now in the provided timezone.
## Examples
> today!("America/Montevideo")
%Date{day: 1, month: 3, year: 2016}
> today!("Australia/Sydney")
%Date{day: 2, month: 3, year: 2016}
"""
def today!(timezone) do
timezone
|> Calendar.DateTime.now!
|> Calendar.DateTime.to_date
end
defp contained_date(date_container), do: Calendar.ContainsDate.date_struct(date_container)
end
defimpl Calendar.ContainsDate, for: Calendar.Date do
def date_struct(data), do: data
end
defimpl Calendar.ContainsDate, for: Calendar.DateTime do
def date_struct(data) do
data |> Calendar.DateTime.to_date
end
end
defimpl Calendar.ContainsDate, for: Calendar.NaiveDateTime do
def date_struct(data) do
data |> Calendar.NaiveDateTime.to_date
end
end
defimpl Calendar.ContainsDate, for: Tuple do
def date_struct({y, m, d}) when y > 23, do: Calendar.Date.from_erl!({y, m, d})
def date_struct({y, _m, _d}) when y <= 23, do: raise "date_struct/1 was called. ContainsDate protocol is not supported for 3-element-tuples where the year is 23 or less. This is to avoid accidently trying to use a time tuple as a date. If you want to work with a date from the year 23 or earlier, consider using a Calendar.Date struct instead."
def date_struct({{y, m, d}, {_hour, _min, _sec}}), do: Calendar.Date.from_erl!({y, m, d})
def date_struct({{y, m, d}, {_hour, _min, _sec, _usec}}), do: Calendar.Date.from_erl!({y, m, d})
end
defimpl Calendar.ContainsDate, for: Date do
def date_struct(%{calendar: Calendar.ISO}=data), do: %Date{day: data.day, month: data.month, year: data.year}
end
defimpl Calendar.ContainsDate, for: DateTime do
def date_struct(%{calendar: Calendar.ISO}=data), do: %Date{day: data.day, month: data.month, year: data.year}
end
defimpl Calendar.ContainsDate, for: NaiveDateTime do
def date_struct(%{calendar: Calendar.ISO}=data), do: %Date{day: data.day, month: data.month, year: data.year}
end | data/web/deps/calendar/lib/calendar/date.ex | 0.806777 | 0.679059 | date.ex | starcoder |
defmodule Membrane.Element.GCloud.SpeechToText.SamplesQueue do
@moduledoc false
alias Membrane.Payload
@type samples_num :: non_neg_integer()
defstruct q: Qex.new(), total: 0, limit: :infinity
@opaque t :: %__MODULE__{
q: Qex.t({samples_num(), Payload.t()}),
total: samples_num(),
limit: :infinity | samples_num()
}
@doc """
Creates a new SamplesQueue. Allows do provide `:limit` option that
determine maximal amount of samples stored in a queue.
"""
@spec new(Keyword.t()) :: t()
def new(opts \\ []) do
args = opts |> Keyword.take([:limit])
struct!(__MODULE__, args)
end
@spec from_list([{samples_num(), Payload.t()}], Keyword.t()) :: t()
def from_list(content, opts \\ []) do
args = opts |> Keyword.take([:limit])
total =
content
|> Enum.reduce(0, fn {samples, _payload}, acc ->
acc + samples
end)
struct!(__MODULE__, args ++ [q: Qex.new(content), total: total])
end
@doc """
Returns total number of samples in payloads stored in a queue.
"""
@spec samples(t()) :: samples_num()
def samples(%__MODULE__{total: samples}) do
samples
end
@doc """
Puts the payload in a queue with a number of samples it contains.
If the total number of samples stored in this queue would rise above
the defined `:limit`, the oldest payloads are dropped.
If the number of samples in the pushed payload is greater than `:limit`, the queue will become empty!
"""
@spec push(t(), Payload.t(), samples_num()) :: t()
def push(%__MODULE__{q: q, total: total_samples} = timed_queue, payload, samples) do
q = q |> Qex.push({samples, payload})
total_samples = total_samples + samples
%{timed_queue | q: q, total: total_samples}
|> pop_until_limit()
end
defp pop_until_limit(%__MODULE__{total: samples, limit: limit} = tq)
when samples <= limit do
tq
end
defp pop_until_limit(%__MODULE__{total: total_samples, limit: limit} = tq)
when total_samples > limit do
{{samples, _}, q} = Qex.pop!(tq.q)
%{tq | q: q, total: total_samples - samples} |> pop_until_limit()
end
@doc """
Drops payloads from the queue with a total number of samples lesser or equal to the provided number.
In other words, marks the provided number of samples as disposable and drops the payloads
that contain only disposable samples.
Returns a tuple with the number of dropped samples and updated queue.
"""
@spec drop_old_samples(t(), samples_num()) :: {samples_num(), t()}
def drop_old_samples(%__MODULE__{} = timed_queue, samples) do
do_drop_old_samples(timed_queue, samples)
end
defp do_drop_old_samples(%__MODULE__{q: q} = timed_queue, samples_to_drop, acc \\ 0) do
{popped, new_q} = q |> Qex.pop()
case popped do
:empty ->
{acc, timed_queue}
{:value, {samples, _payload}} when samples_to_drop < samples ->
{acc, timed_queue}
{:value, {samples, _payload}} ->
timed_queue
|> Map.update!(:total, &(&1 - samples))
|> Map.put(:q, new_q)
|> do_drop_old_samples(samples_to_drop - samples, acc + samples)
end
end
@doc """
Returns the most recent payloads stored in queue containing at least provided number of samples.
"""
@spec peek_by_samples(t(), samples_num()) :: [Payload.t()]
def peek_by_samples(%__MODULE__{q: q}, samples) do
do_peek(q, samples, [])
end
defp do_peek(_q, samples, acc) when samples <= 0 do
acc
end
defp do_peek(q, samples, acc) do
{popped, new_q} = q |> Qex.pop_back()
case popped do
:empty ->
acc
{:value, {popped_samples, _payload}} when popped_samples > samples ->
acc
{:value, {popped_samples, _payload} = entry} ->
do_peek(new_q, samples - popped_samples, [entry | acc])
end
end
@doc """
Returns a list of payloads stored in a queue
"""
@spec payloads(t()) :: [Payload.t()]
def payloads(%__MODULE__{q: q}) do
q |> Enum.to_list() |> Enum.map(&elem(&1, 1))
end
@doc """
Returns a list of payloads stored in a queue and makes the queue empty.
"""
@spec flush(t()) :: {[Payload.t()], t()}
def flush(%__MODULE__{} = tq) do
payloads = tq |> payloads()
{payloads, %{tq | q: Qex.new(), total: 0}}
end
end | lib/gcloud_speech_to_text/samples_queue.ex | 0.924253 | 0.588121 | samples_queue.ex | starcoder |
defmodule ExDhcp.Utils do
@moduledoc """
Utilities that make the DHCP module easy to use. Typespecs for data types
and binary and string conversions for ip and mac addresses
"""
@typedoc "erlang-style ip addresses"
@type ip4 :: :inet.ip4_address
@typedoc "mac addresses in the same style as the erlang ip address."
@type mac :: {byte, byte, byte, byte, byte, byte}
@doc """
represents an erlang-style ip4 value as a string, without going through
a list intermediate.
"""
@spec ip2str(ip4) :: binary
def ip2str({a, b, c, d}), do: "#{a}.#{b}.#{c}.#{d}"
@doc """
converts an erlang-style ip address, (4-tuple of bytes)
to a binary stored ip address (in the dhcp packet spec) to
"""
@spec ip2bin(ip4) :: <<_::32>>
def ip2bin({a, b, c, d}), do: <<a, b, c, d>>
@doc """
converts a binary stored ip address (in the dhcp packet spec) to
an erlang-style ip address. (4-tuple of bytes)
"""
@spec bin2ip(<<_::32>>) :: ip4
def bin2ip(<<a, b, c, d>>), do: {a, b, c, d}
@doc """
converts a binary stored mac address (in the dhcp packet spec) to
an erlang-style mac address. (6-tuple of bytes)
"""
def bin2mac(<<a, b, c, d, e, f>>), do: {a, b, c, d, e, f}
@doc """
converts an erlang-style mac address (6-tuple of bytes) to a
binary stored mac address (in the dhcp packet spec).
"""
def mac2bin({a, b, c, d, e, f}), do: <<a, b, c, d, e, f>>
@doc """
converts a mac address 6-byte tuple to a string.
```elixir
iex> ExDhcp.Utils.mac2str({1, 2, 3, 16, 255, 254})
"01:02:03:10:FF:FE"
```
"""
def mac2str(mac = {_, _, _, _, _, _}) do
mac
|> Tuple.to_list
|> Enum.map(&padhex/1)
|> Enum.join(":")
end
@doc """
converts a mac address string into a raw binary value
```elixir
iex> ExDhcp.Utils.str2mac("01:02:03:10:FF:FE")
{1, 2, 3, 16, 255, 254}
```
"""
def str2mac(<<a::16, ":", b::16, ":", c::16, ":", d::16, ":", e::16, ":", f::16>>) do
[<<a::16>>, <<b::16>>, <<c::16>>, <<d::16>>, <<e::16>>, <<f::16>>]
|> Enum.map(&String.to_integer(&1, 16))
|> List.to_tuple
end
defp padhex(v) when v < 16, do: "0" <> Integer.to_string(v, 16)
defp padhex(v), do: Integer.to_string(v, 16)
@spec cidr2mask(cidr :: 0..32) :: ip4
@doc """
creates a subnet mask from a cidr value
```elixir
iex> ExDhcp.Utils.cidr2mask(24)
{255, 255, 255, 0}
```
"""
def cidr2mask(n) do
import Bitwise
<<a, b, c, d>> = <<-1 <<< (32 - n)::32>>
{a, b, c, d}
end
end | lib/ex_dhcp/utils.ex | 0.847495 | 0.776072 | utils.ex | starcoder |
defmodule Herald.Message do
@moduledoc """
Defines the behaviour to Message Schemas.
Message Schemas are the structs which represents data
exchanged using a Broker queue.
Any message which you expect receive or send
in your application must be represented by a module,
and this module must `use/2` `Herald.Message`
and defines a `payload/1` for the represented
message, as bellow:
```
defmodule MyApp.UserRegistered do
use Herald.Message
payload do
field :age, :integer
field :name, :string, required: true
end
end
```
Each message received in the broker is
converted in a struct with the following fields:
* `id` - A unique UUID for message. Can
be used to filter duplicated messages;
* `queue` - The queue where this message
is received of will be sent;
* `payload` - Content of message, represented
by a map of atom keys, and defined by `payload/3`
call in module definition;
* `valid?` - Indicates if message is valid,
eg, with all fields have correct type, and
if required fields are present.
The definition of which Message Schema will be
used to represents a message received from broker
must be created in Router. More details,
see `Herald.Router`.
"""
@doc false
defmacro __using__(_opts) do
quote do
@schema %{}
@required []
@before_compile Herald.Message
@derive {Jason.Encoder, except: [:errors, :valid?]}
defstruct [
id: nil,
queue: nil,
errors: [],
payload: nil,
valid?: false
]
@type t :: %__MODULE__{
id: UUID.t(),
queue: binary(),
payload: map(),
valid?: boolean()
}
import Ecto.Changeset
import Herald.Message
end
end
@doc """
Defines the valid payload for a
message, i.e. wrap all `field/3` calls
"""
defmacro payload(do: block) do
block
end
@doc """
Defines a field of payload.
Fields receives a `name`, `type`, and
can have aditional options.
### Options
* `required` - A `boolean` indicating
if field is required to be present or
not.
"""
defmacro field(name, type, opts \\ []) do
quote do
name = unquote(name)
type = unquote(type)
opts = unquote(opts)
if Keyword.get(opts, :required, false) do
@required [name | @required]
end
@schema Map.put(@schema, name, type)
end
end
defmacro __before_compile__(_env) do
quote do
@doc """
Create new message and validate their payload
"""
@spec new(binary(), map(), any()) :: t()
def new(queue, payload, opts \\ [])
when is_binary(queue) and is_map(payload) do
%__MODULE__{}
|> set_message_id(opts)
|> Map.put(:queue, queue)
|> Map.put(:payload, payload)
|> validate_payload()
end
@doc """
Create new message from a JSON string
Basicaly, its decode the JSON and forward it to `new/3`
"""
@spec from_string(binary(), map(), any()) :: t()
def from_string(queue, payload, opts \\ []) do
case Jason.decode(payload) do
{:ok, decoded} ->
new(queue, decoded, opts)
another ->
another
end
end
defp set_message_id(%__MODULE__{} = message, opts) do
Keyword.get(opts, :id)
|> is_nil()
|> if do
Map.put(message, :id, UUID.uuid4())
else
Map.put(message, :id, Keyword.get(opts, :id))
end
end
defp validate_payload(%__MODULE__{payload: payload} = message) do
%{valid?: valid, errors: errors, changes: changes} =
{Map.new(), @schema}
|> cast(payload, Map.keys(@schema))
|> validate_required(@required)
message
|> Map.put(:valid?, valid)
|> Map.put(:errors, errors)
|> Map.put(:payload, changes)
end
if Mix.env() == :test do
def schema(), do: @schema
end
end
end
end | lib/herald/message.ex | 0.886377 | 0.800185 | message.ex | starcoder |
defmodule AWS.ServiceQuotas do
@moduledoc """
Service Quotas is a web service that you can use to manage many of your AWS
service quotas.
Quotas, also referred to as limits, are the maximum values for a resource, item,
or operation. This guide provide descriptions of the Service Quotas actions that
you can call from an API. For the Service Quotas user guide, which explains how
to use Service Quotas from the console, see [What is Service Quotas](https://docs.aws.amazon.com/servicequotas/latest/userguide/intro.html).
AWS provides SDKs that consist of libraries and sample code for programming
languages and platforms (Java, Ruby, .NET, iOS, Android, etc...,). The SDKs
provide a convenient way to create programmatic access to Service Quotas and
AWS. For information about the AWS SDKs, including how to download and install
them, see the [Tools for Amazon Web Services](https://docs.aws.amazon.com/aws.amazon.com/tools) page.
"""
@doc """
Associates the Service Quotas template with your organization so that when new
accounts are created in your organization, the template submits increase
requests for the specified service quotas.
Use the Service Quotas template to request an increase for any adjustable quota
value. After you define the Service Quotas template, use this operation to
associate, or enable, the template.
"""
def associate_service_quota_template(client, input, options \\ []) do
request(client, "AssociateServiceQuotaTemplate", input, options)
end
@doc """
Removes a service quota increase request from the Service Quotas template.
"""
def delete_service_quota_increase_request_from_template(client, input, options \\ []) do
request(client, "DeleteServiceQuotaIncreaseRequestFromTemplate", input, options)
end
@doc """
Disables the Service Quotas template.
Once the template is disabled, it does not request quota increases for new
accounts in your organization. Disabling the quota template does not apply the
quota increase requests from the template.
## Related operations
* To enable the quota template, call
`AssociateServiceQuotaTemplate`.
* To delete a specific service quota from the template, use
`DeleteServiceQuotaIncreaseRequestFromTemplate`.
"""
def disassociate_service_quota_template(client, input, options \\ []) do
request(client, "DisassociateServiceQuotaTemplate", input, options)
end
@doc """
Retrieves the default service quotas values.
The Value returned for each quota is the AWS default value, even if the quotas
have been increased..
"""
def get_a_w_s_default_service_quota(client, input, options \\ []) do
request(client, "GetAWSDefaultServiceQuota", input, options)
end
@doc """
Retrieves the `ServiceQuotaTemplateAssociationStatus` value from the service.
Use this action to determine if the Service Quota template is associated, or
enabled.
"""
def get_association_for_service_quota_template(client, input, options \\ []) do
request(client, "GetAssociationForServiceQuotaTemplate", input, options)
end
@doc """
Retrieves the details for a particular increase request.
"""
def get_requested_service_quota_change(client, input, options \\ []) do
request(client, "GetRequestedServiceQuotaChange", input, options)
end
@doc """
Returns the details for the specified service quota.
This operation provides a different Value than the `GetAWSDefaultServiceQuota`
operation. This operation returns the applied value for each quota.
`GetAWSDefaultServiceQuota` returns the default AWS value for each quota.
"""
def get_service_quota(client, input, options \\ []) do
request(client, "GetServiceQuota", input, options)
end
@doc """
Returns the details of the service quota increase request in your template.
"""
def get_service_quota_increase_request_from_template(client, input, options \\ []) do
request(client, "GetServiceQuotaIncreaseRequestFromTemplate", input, options)
end
@doc """
Lists all default service quotas for the specified AWS service or all AWS
services.
ListAWSDefaultServiceQuotas is similar to `ListServiceQuotas` except for the
Value object. The Value object returned by `ListAWSDefaultServiceQuotas` is the
default value assigned by AWS. This request returns a list of all service quotas
for the specified service. The listing of each you'll see the default values are
the values that AWS provides for the quotas.
Always check the `NextToken` response parameter when calling any of the `List*`
operations. These operations can return an unexpected list of results, even when
there are more results available. When this happens, the `NextToken` response
parameter contains a value to pass the next call to the same API to request the
next part of the list.
"""
def list_a_w_s_default_service_quotas(client, input, options \\ []) do
request(client, "ListAWSDefaultServiceQuotas", input, options)
end
@doc """
Requests a list of the changes to quotas for a service.
"""
def list_requested_service_quota_change_history(client, input, options \\ []) do
request(client, "ListRequestedServiceQuotaChangeHistory", input, options)
end
@doc """
Requests a list of the changes to specific service quotas.
This command provides additional granularity over the
`ListRequestedServiceQuotaChangeHistory` command. Once a quota change request
has reached `CASE_CLOSED, APPROVED,` or `DENIED`, the history has been kept for
90 days.
"""
def list_requested_service_quota_change_history_by_quota(client, input, options \\ []) do
request(client, "ListRequestedServiceQuotaChangeHistoryByQuota", input, options)
end
@doc """
Returns a list of the quota increase requests in the template.
"""
def list_service_quota_increase_requests_in_template(client, input, options \\ []) do
request(client, "ListServiceQuotaIncreaseRequestsInTemplate", input, options)
end
@doc """
Lists all service quotas for the specified AWS service.
This request returns a list of the service quotas for the specified service.
you'll see the default values are the values that AWS provides for the quotas.
Always check the `NextToken` response parameter when calling any of the `List*`
operations. These operations can return an unexpected list of results, even when
there are more results available. When this happens, the `NextToken` response
parameter contains a value to pass the next call to the same API to request the
next part of the list.
"""
def list_service_quotas(client, input, options \\ []) do
request(client, "ListServiceQuotas", input, options)
end
@doc """
Lists the AWS services available in Service Quotas.
Not all AWS services are available in Service Quotas. To list the see the list
of the service quotas for a specific service, use `ListServiceQuotas`.
"""
def list_services(client, input, options \\ []) do
request(client, "ListServices", input, options)
end
@doc """
Defines and adds a quota to the service quota template.
To add a quota to the template, you must provide the `ServiceCode`, `QuotaCode`,
`AwsRegion`, and `DesiredValue`. Once you add a quota to the template, use
`ListServiceQuotaIncreaseRequestsInTemplate` to see the list of quotas in the
template.
"""
def put_service_quota_increase_request_into_template(client, input, options \\ []) do
request(client, "PutServiceQuotaIncreaseRequestIntoTemplate", input, options)
end
@doc """
Retrieves the details of a service quota increase request.
The response to this command provides the details in the
`RequestedServiceQuotaChange` object.
"""
def request_service_quota_increase(client, input, options \\ []) do
request(client, "RequestServiceQuotaIncrease", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "servicequotas"}
host = build_host("servicequotas", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "ServiceQuotasV20190624.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end | lib/aws/generated/service_quotas.ex | 0.859 | 0.631992 | service_quotas.ex | starcoder |
defmodule TradeIndicators.ATR do
use TypedStruct
alias __MODULE__, as: ATR
alias __MODULE__.Item
alias TradeIndicators.MA
alias TradeIndicators.Util, as: U
alias Decimal, as: D
alias List, as: L
alias Enum, as: E
alias Map, as: M
typedstruct do
field :list, List.t(), default: []
field :period, pos_integer(), default: 14
field :method, :ema | :wma, default: :ema
end
typedstruct module: Item do
field :avg, D.t() | nil, default: nil
field :tr, D.t() | nil, default: nil
field :t, non_neg_integer()
end
@zero D.new(0)
def step(chart = %ATR{list: atr_list, period: period, method: method}, bars)
when is_list(bars) and is_list(atr_list) and is_integer(period) and period > 1 do
ts = L.last(bars)[:t] || 0
case length(bars) do
0 ->
chart
n when n < period ->
new_atr = %{avg: nil, t: ts, tr: E.take(bars, -2) |> get_tr()}
%{chart | list: atr_list ++ [new_atr]}
_ ->
new_atr = E.take(bars, -2) |> get_tr() |> get_atr(atr_list, period, ts, method)
%{chart | list: atr_list ++ [new_atr]}
end
end
def get_tr([%{c: c, h: h, l: l}]), do: get_tr(c, h, l)
def get_tr([%{c: c}, %{h: h, l: l}]), do: get_tr(c, h, l)
def get_tr(c = %D{}, h = %D{}, l = %D{}) do
D.sub(h, l)
|> D.max(D.abs(D.sub(h, c)))
|> D.max(D.abs(D.sub(l, c)))
end
def make_tr_list(new_tr, atr_list, period) do
atr_list
|> E.take(-(period - 1))
|> E.map(fn %{tr: v} -> v || @zero end)
|> E.concat([new_tr])
end
def get_atr(new_tr, atr_list, period, ts, avg_fn) when avg_fn in [:wma, :ema] do
%Item{
avg: get_avg(atr_list, new_tr, period, avg_fn),
tr: new_tr,
t: ts
}
end
def get_avg(atr_list, new_tr, period, :wma) do
new_tr
|> make_tr_list(atr_list, period)
|> MA.wma(period)
end
def get_avg(atr_list, new_tr, period, :ema) do
if length(atr_list) == period - 1 do
atr_list
|> E.map(fn %{tr: tr} -> tr end)
|> E.concat([new_tr])
|> E.reduce(@zero, fn n, t -> D.add(t, U.dec(n)) end)
|> D.div(period)
else
atr_list
|> L.last()
|> M.get(:avg)
|> (fn last_tr -> {last_tr, new_tr} end).()
|> MA.ema(period)
end
end
end | lib/atr.ex | 0.547948 | 0.465934 | atr.ex | starcoder |
defmodule Crux.Structs.Snowflake.Parts do
@moduledoc """
Custom non discord api struct representing a deconstructed Discord snowflake.
## Structure of the Parts
| Field | Bits | Number of Bits | Description |
| :-----------------: | :------: | :------------: | :------------------------------------------------------------------------: |
| Timestamp | 63 to 22 | 42 bits | Milliseconds since Discord Epoch (1420070400000) |
| Internal Worker ID | 21 to 17 | 5 bits | |
| Internal Process ID | 16 to 12 | 5 bits | |
| Increment | 11 to 0 | 12 bits | For every ID that is generated on that process, this number is incremented |
For more information see [Discord Docs](https://discordapp.com/developers/docs/reference#snowflakes).
"""
use Bitwise
alias Crux.Structs.{Snowflake, Util}
require Util
Util.modulesince("0.2.1")
@discord_epoch 1_420_070_400_000
@doc false
@spec discord_epoch() :: non_neg_integer()
Util.since("0.2.1")
def discord_epoch(), do: @discord_epoch
# bits 63 to 22
@timestamp_bitmask 0xFFFF_FFFF_FFC0_0000
# bits 21 to 17
@worker_id_bitmask 0x3E_0000
# bits 16 to 12
@process_id_bitmask 0x1_F000
# bits 11 to 0
@increment_bitmask 0xFFF
@typedoc """
The parts of a `t:Crux.Structs.Snowflake.t/0`.
"""
Util.typesince("0.2.1")
@type t :: %Snowflake.Parts{
timestamp: non_neg_integer,
worker_id: non_neg_integer,
process_id: non_neg_integer,
increment: non_neg_integer
}
defstruct timestamp: @discord_epoch,
worker_id: 0,
process_id: 0,
increment: 0
@doc false
@spec deconstruct(Snowflake.t()) :: t
Util.since("0.2.1")
def deconstruct(snowflake) when is_integer(snowflake) and snowflake >= 0 do
%Snowflake.Parts{
timestamp: ((snowflake &&& @timestamp_bitmask) >>> 22) + @discord_epoch,
worker_id: (snowflake &&& @worker_id_bitmask) >>> 17,
process_id: (snowflake &&& @process_id_bitmask) >>> 12,
increment: snowflake &&& @increment_bitmask >>> 0
}
end
@doc false
@spec construct(t | Keyword.t()) :: Snowflake.t()
Util.since("0.2.1")
def construct(%Snowflake.Parts{
timestamp: timestamp,
worker_id: worker_id,
process_id: process_id,
increment: increment
})
when timestamp >= @discord_epoch and worker_id >= 0 and process_id >= 0 and increment >= 0 do
timestamp = timestamp - @discord_epoch
0
|> bor(timestamp <<< 22 &&& @timestamp_bitmask)
|> bor(worker_id <<< 17 &&& @worker_id_bitmask)
|> bor(process_id <<< 12 &&& @process_id_bitmask)
|> bor(increment <<< 0 &&& @increment_bitmask)
end
def construct(opts) when is_list(opts) do
Snowflake.Parts
|> struct(opts)
|> construct()
end
end
defmodule Crux.Structs.Snowflake do
@moduledoc """
Custom non discord api struct to help with working with Discord's snowflakes.
For more information see [Discord Docs](https://discordapp.com/developers/docs/reference#snowflakes).
"""
use Bitwise
alias Crux.Structs.{Snowflake, Util}
require Util
Util.modulesince("0.2.1")
@typedoc """
A discord `snowflake`, an unsigned 64 bit integer.
"""
Util.typesince("0.2.1")
@type t :: 0..0xFFFF_FFFF_FFFF_FFFF
@typedoc """
All valid types that can be resolved into a `t:t/0`.
"""
Util.typesince("0.2.1")
@type resolvable :: String.t() | t()
@doc """
Returns `true` if `term` is a `t:t/0`; otherwise returns `false`..
"""
Util.since("0.2.1")
defguard is_snowflake(snowflake)
when is_integer(snowflake) and snowflake in 0..0xFFFF_FFFF_FFFF_FFFF
@doc """
The discord epoch, the first second of 2015 or `1420070400000`.
```elixir
iex> Crux.Structs.Snowflake.discord_epoch()
1_420_070_400_000
```
"""
@spec discord_epoch() :: non_neg_integer()
Util.since("0.2.1")
defdelegate discord_epoch(), to: Crux.Structs.Snowflake.Parts
@doc """
Deconstructs a `t:t/0` to its `t:Crux.Structs.Snowflake.Parts.t/0`.
```elixir
iex> Crux.Structs.Snowflake.deconstruct(218348062828003328)
%Crux.Structs.Snowflake.Parts{
increment: 0,
process_id: 0,
timestamp: 1472128634889,
worker_id: 1
}
```
"""
@spec deconstruct(t) :: Snowflake.Parts.t()
Util.since("0.2.1")
defdelegate deconstruct(snowflake), to: Snowflake.Parts
@doc """
Constructs a `t:t/0` from its `t:Crux.Structs.Snowflake.Parts.t/0` or a keyword of its fields.
```elixir
iex> %Crux.Structs.Snowflake.Parts{increment: 0, process_id: 0, timestamp: 1472128634889, worker_id: 1}
...> |> Crux.Structs.Snowflake.construct()
218348062828003328
iex> Crux.Structs.Snowflake.construct(increment: 1, timestamp: 1451106635493)
130175406673231873
iex> Crux.Structs.Snowflake.construct(timestamp: Crux.Structs.Snowflake.discord_epoch())
0
```
"""
@spec construct(Snowflake.Parts.t() | Keyword.t()) :: t
Util.since("0.2.1")
defdelegate construct(parts), to: Snowflake.Parts
@doc """
Converts a `t:String.t/0` to a `t:t/0` while allowing `t:t/0` and `nil` to pass through.
Raises an `ArgumentError` if the provided string is not an integer.
```elixir
iex> Crux.Structs.Snowflake.to_snowflake(218348062828003328)
218348062828003328
# Fallbacks
iex> Crux.Structs.Snowflake.to_snowflake("218348062828003328")
218348062828003328
iex> Crux.Structs.Snowflake.to_snowflake(nil)
nil
```
"""
@spec to_snowflake(t()) :: t()
@spec to_snowflake(String.t()) :: t() | no_return()
@spec to_snowflake(nil) :: nil
Util.since("0.2.1")
def to_snowflake(nil), do: nil
def to_snowflake(snowflake) when is_snowflake(snowflake) do
snowflake
end
def to_snowflake(string) when is_binary(string) do
string
|> String.to_integer()
|> to_snowflake()
end
@doc """
Converts a `t:String.t/0` to a `t:t/0` while allowing `t:t/0` to pass through.
Returns `:error` if the provided string is not a `t:t/0`.
```elixir
iex> Crux.Structs.Snowflake.parse("invalid")
:error
iex> Crux.Structs.Snowflake.parse(218348062828003328)
218348062828003328
# Fallbacks
iex> Crux.Structs.Snowflake.parse("218348062828003328")
218348062828003328
```
"""
@spec parse(t()) :: t()
@spec parse(String.t()) :: t() | :error
Util.since("0.2.1")
def parse(snowflake) when is_snowflake(snowflake) do
snowflake
end
def parse(string) when is_binary(string) do
case Integer.parse(string) do
{snowflake, ""} when is_snowflake(snowflake) ->
snowflake
_ ->
:error
end
end
# delegates
@doc """
Deconstructs a `t:t/0` to its `t:Crux.Structs.Snowflake.Parts.t/0`.
"""
@spec from_integer(t) :: Snowflake.Parts.t()
Util.since("0.2.1")
defdelegate from_integer(snowflake), to: Snowflake.Parts, as: :deconstruct
@doc """
Constructs a `t:t/0` from its `t:Crux.Structs.Snowflake.Parts.t/0`.
"""
@spec to_integer(Snowflake.Parts.t()) :: t()
Util.since("0.2.1")
defdelegate to_integer(t), to: Snowflake.Parts, as: :construct
end | lib/structs/snowflake.ex | 0.840128 | 0.698355 | snowflake.ex | starcoder |
defmodule Goldie.Utils do
@moduledoc """
Misc utils
"""
@doc """
min of two numbers
"""
@spec min(number, number) :: number()
def min(x, y) when x < y, do: x
def min(_x, y), do: y
@doc """
max of two numbers
"""
@spec max(number, number) :: number()
def max(x, y) when x > y, do: x
def max(_x, y), do: y
@doc """
Generate an id
"""
@spec id(atom) :: binary
def id(prefix) when is_atom(prefix) do
id(:erlang.atom_to_binary(prefix, :utf8))
end
@spec id(binary) :: binary
def id(prefix) do
id = id()
<<prefix :: binary, ":", id :: binary>>
end
@spec id() :: binary
def id() do
stamp = :erlang.phash2({:erlang.node(), :erlang.monotonic_time()})
:base64.encode(<<stamp :: size(32)>>)
end
@doc """
Convert map string keys to atoms
"""
def atomify_map_keys(input) when is_map(input) do
Enum.reduce(input, %{}, fn({key, value}, acc) ->
Dict.put(acc, String.to_atom(key), atomify_map_keys(value))
end)
end
def atomify_map_keys(input), do: input
@doc """
Convert atom keys and values to binary
"""
def binarify_map(input) when is_map(input) do
Enum.reduce(input, %{}, fn({key, value}, acc) ->
Dict.put(acc, binarify_map(key), binarify_map(value))
end)
end
def binarify_map(input) when is_list(input) do
for elem <- input, do: binarify_map(elem)
end
def binarify_map(input) when is_atom(input) and input != true and input != false do
:erlang.atom_to_binary(input, :utf8)
end
def binarify_map(input), do: input
@spec timestamp_ms() :: integer
def timestamp_ms() do
{mega, seconds, ms} = :os.timestamp()
(mega*1000000 + seconds)*1000 + :erlang.round(ms/1000)
end
@doc """
How many milliseconds since a millisecond timestamp
"""
@spec ms_since_timestamp_ms(integer) :: integer
def ms_since_timestamp_ms(ts) do
timestamp_ms() - ts
end
@doc """
Current time in seconds since 1.1.1970 in universal time
"""
@spec timestamp_epoch() :: integer
def timestamp_epoch() do
seconds = :calendar.datetime_to_gregorian_seconds(:calendar.universal_time())
unix_start_epoch={{1970,1,1},{0,0,0}}
unix_start_epoch_sec = :calendar.datetime_to_gregorian_seconds(unix_start_epoch)
seconds - unix_start_epoch_sec
end
@doc """
Entity contact information
"""
@spec entity_contact(struct) :: struct
def entity_contact(entity) do
Map.take(entity, [
:id,
:pid,
:loc,
:asset
])
end
@doc """
Make an union of two lists of entities
"""
@spec entity_union(list, list) :: list
def entity_union(list1, list2) do
Enum.reduce(list1, list2, fn(entity, acc_in) ->
case select_matches(acc_in, %{id: entity.id}) do
[] ->
[entity|acc_in]
_ ->
acc_in
end
end)
end
@doc """
Select maps from a list maps that match given keys
Example: select_matches([%{id: 1, other: "data1"}, %{id: 2, other: "data2"}], %{id: 1, other: "data1"}) -> [%{id: 1}]
"""
@spec select_matches([map], map) :: [map]
def select_matches(map_list, match_map) do
select_or_delete_matches(map_list, match_map, :select)
end
@doc """
Delete props from a list that match certain props.
"""
@spec delete_matches([map], map) :: [map]
def delete_matches(map_list, match_map) do
select_or_delete_matches(map_list, match_map, :delete)
end
## Internal functions
## Select or delete props from a list of props.
@spec select_or_delete_matches([map], map, :select | :delete) :: [map]
defp select_or_delete_matches(map_list, match_map, operation) do
Enum.filter(map_list, fn(map) ->
case match(map, match_map) do
true ->
operation == :select
_ ->
operation != :select
end
end)
end
##Test if a props matches another props.
@spec match(map, map) :: boolean
defp match(map, match_map) do
Enum.reduce(Map.to_list(match_map), true, fn({match_key, match_val}, acc_in) ->
case acc_in do
false -> ## Found out already that not a match
false
_ ->
case Map.get(map, match_key) do
nil ->
false;
map_val when is_map(map_val) ->
match(map_val, match_val)
^match_val ->
true
_ ->
false
end
end
end)
end
end | lib/goldie/utils.ex | 0.734596 | 0.517693 | utils.ex | starcoder |