code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule CIDR do
use Bitwise
@moduledoc """
Classless Inter-Domain Routing (CIDR)
"""
defstruct first: nil, last: nil, mask: nil, hosts: nil
defimpl String.Chars, for: CIDR do
@doc """
Prints cidr objectes in human readable format
IPv4: 1.1.1.0/24
IPv6: 2001::/64
"""
def to_string(cidr), do: "#{:inet.ntoa(cidr.first)}/#{cidr.mask}"
end
@doc """
Check whether the argument is a CIDR value.
## Examples
iex> CIDR.is_cidr?("192.168.1.254/32")
true
"""
def is_cidr?(cidr) when is_map(cidr) do
cidr.__struct__ == CIDR
end
def is_cidr?(string) when is_bitstring(string) do
string
|> parse
|> is_cidr?
end
def is_cidr?(_), do: false
@doc """
Checks if an IP address is in the provided CIDR.
Returns `{:ok, true}` if the address is in the CIDR range, `{:ok, false}` if
it's not, and `{:error, reason}` if the second argument isn't a valid IP
address.
"""
def match(cidr, address) when is_binary(address) do
case parse_address(address) do
{:ok, ip} -> match(cidr, ip)
{:error, reason} -> {:error, reason}
end
end
def match(%CIDR{first: {a, b, c, d}, last: {e, f, g, h}}, address = {i, j, k, l}) do
if is_ipv4(address) do
result =
i in a..e and
j in b..f and
k in c..g and
l in d..h
{:ok, result}
else
{:error, "Tuple is not a valid IP address"}
end
end
def match(%CIDR{first: {a, b, c, d, e, f, g, h}, last: {i, j, k, l, m, n, o, p}},
address = {q, r, s, t, u, v, w, x}) do
if is_ipv6(address) do
result =
q in a..i and
r in b..j and
s in c..k and
t in d..l and
u in e..m and
v in f..n and
w in g..o and
x in h..p
{:ok, result}
else
{:error, "Tuple is not a valid IP address"}
end
end
def match(_cidr, _address),
do: {:error, "Argument must be a binary or IP tuple of the same protocol"}
@doc """
Throwing version of match/2, raises `ArgumentError` on error.
"""
def match!(cidr, address) do
case match(cidr, address) do
{:ok, result} -> result
{:error, reason} -> raise ArgumentError, message: reason
end
end
@doc """
Returns a stream of all hosts in the range
## Examples
iex> CIDR.parse("192.168.0.0/31") |> CIDR.hosts |> Enum.map(fn(x) -> x end)
[{192, 168, 0, 0}, {192, 168, 0, 1}]
"""
def hosts(%CIDR{first: {_, _, _, _}} = cidr) do
t = tuple2number(cidr.first, (32 - cidr.mask))
Stream.map(0..(cidr.hosts - 1), fn(x) -> number2tuple(t + x, :ipv4) end)
end
def hosts(%CIDR{first: {_, _, _, _, _, _, _, _}} = cidr) do
t = tuple2number(cidr.first, (128 - cidr.mask))
Stream.map(0..(cidr.hosts - 1), fn(x) -> number2tuple(t + x, :ipv6) end)
end
@doc """
Checks if two cidr objects are equal
### Examples
iex> d = CIDR.parse("10.0.0.0/24")
%CIDR{first: {10, 0, 0, 0}, hosts: 256, last: {10, 0, 0, 255}, mask: 24}
iex> c = CIDR.parse("10.0.0.0/24")
%CIDR{first: {10, 0, 0, 0}, hosts: 256, last: {10, 0, 0, 255}, mask: 24}
iex(21)> CIDR.equal?(d, c)
true
"""
def equal?(a, b) do
a.first == b.first and
a.last == b.last
end
@doc """
Checks if a is a subnet of b
"""
def subnet?(%CIDR{mask: mask_a}, %CIDR{mask: mask_b}) when mask_a < mask_b do
false
end
def subnet?(a, b) do
(tuple2number(a.first, 0) >= tuple2number(b.first, 0)) and
(tuple2number(a.last, 0) <= tuple2number(b.last, 0))
end
@doc """
Checks if a is a supernet of b
"""
def supernet?(%CIDR{mask: mask_a}, %CIDR{mask: mask_b}) when mask_a > mask_b do
false
end
def supernet?(a, b) do
tuple2number(a.first, 0) <= tuple2number(b.first, 0) and
tuple2number(a.last, 0) >= tuple2number(b.last, 0)
end
@doc """
Splits an existing cidr into smaller blocks
### Examples
iex> CIDR.parse("192.168.0.0/24") |> CIDR.split(25) |> Enum.map(&(&1))
[%CIDR{first: {192, 168, 0, 0}, hosts: 128, last: {192, 168, 0, 127}, mask: 25},
%CIDR{first: {192, 168, 0, 128}, hosts: 128, last: {192, 168, 0, 255}, mask: 25}]
"""
def split(%CIDR{mask: mask}, new_mask) when mask > new_mask do
{:error, "New mask must be larger than existing cidr"}
end
def split(%CIDR{first: {_, _, _, _}}=cidr, new_mask) do
x = tuple2number(cidr.first, 32 - cidr.mask)
split(x, new_mask, cidr.mask, :ipv4)
end
def split(%CIDR{first: {_, _, _, _, _, _, _, _}}=cidr, new_mask) do
x = tuple2number(cidr.first, 128 - cidr.mask)
split(x, new_mask, cidr.mask, :ipv6)
end
defp split(start, new_mask, old_mask, afi) do
n = :math.pow(2, (new_mask - old_mask))- 1 |> round
step = num_hosts(afi, new_mask)
Stream.map(0..n, fn(x) ->
offset = start + (step) * x
first = number2tuple(offset, afi)
last = number2tuple((offset + (step - 1)), afi)
%CIDR{first: first, last: last, mask: new_mask, hosts: step}
end)
end
@doc """
Parses a bitstring into a CIDR struct
"""
def parse(string) when string |> is_bitstring do
[address | mask] = string |> String.split("/")
case parse_address(address) do
{:ok, address} -> parse(address, mask)
{:error, reason} -> {:error, reason}
end
end
# Only bitstrings can be parsed
def parse(_other) do
{:error, "Not a bitstring"}
end
# We got a simple IP address without mask
defp parse(address, []) when tuple_size(address) == 4 do
create(address, address, 32, num_hosts(:ipv4, 32))
end
defp parse(address, []) when tuple_size(address) == 8 do
create(address, address, 128, num_hosts(:ipv6, 128))
end
# We got a mask and need to convert it to integer
defp parse(address, [mask]) do
parse(address, mask |> int)
end
# Validate that mask is valid
defp parse(address, mask) when tuple_size(address) == 4 and not mask in 0..32 do
{:error, "Invalid mask #{mask}"}
end
defp parse(address, mask) when tuple_size(address) == 8 and not mask in 0..128 do
{:error, "Invalid mask #{mask}"}
end
# Everything is fine
defp parse(address, mask) when tuple_size(address) == 4 do
parse(address, mask, :ipv4)
end
defp parse(address, mask) when tuple_size(address) == 8 do
parse(address, mask, :ipv6)
end
defp parse(address, mask, version) do
first = range_address(version, address, mask, false)
last = range_address(version, address, mask, true)
create(first, last, mask, num_hosts(version, mask))
end
defp parse_address(address) do
address |> String.to_char_list |> :inet.parse_address
end
defp create(first, last, mask, hosts) do
%CIDR{first: first, last: last, mask: mask, hosts: hosts}
end
defp num_hosts(:ipv4, mask), do: 1 <<< (32 - mask)
defp num_hosts(:ipv6, mask), do: 1 <<< (128 - mask)
defp range_address(:ipv4, tuple, mask, is_last) do
s = (32 - mask)
x = tuple2number(tuple, s)
x = if is_last, do: x ||| ((1 <<< s) - 1), else: x
x |> number2tuple(:ipv4)
end
defp range_address(:ipv6, tuple, mask, is_last) do
s = (128 - mask)
x = tuple2number(tuple, s)
x = if is_last, do: x ||| ((1 <<< s) - 1), else: x
x |> number2tuple(:ipv6)
end
def number2tuple(n, afi) do
case afi do
:ipv6 -> number2list(n, 0, 16, 8, 0xFFFF) |> List.to_tuple
:ipv4 -> number2list(n, 0, 8, 4, 0xFF) |> List.to_tuple
end
end
def number2list(_, _, _, 0, _), do: []
def number2list(x, s, d, i, m) do
number2list(x, s + d, d, i - 1, m) ++ [(x >>> s) &&& m]
end
def tuple2number({a, b, c, d}, s) do
(((a <<< 24) ||| (b <<< 16) ||| (c <<< 8) ||| d) >>> s) <<< s
end
def tuple2number({a, b, c, d, e, f, g, h}, s) do
(((a <<< 112) ||| (b <<< 96) ||| (c <<< 80) ||| (d <<< 64)
||| (e <<< 48) ||| (f <<< 32) ||| (g <<< 16) ||| h) >>> s) <<< s
end
defp is_ipv4({_, _, _, _} = tuple), do: is_ipvx(tuple, 0..255)
defp is_ipv4(_), do: false
defp is_ipv6({_, _, _, _, _, _, _, _} = tuple), do: is_ipvx(tuple, 0..65_535)
defp is_ipv6(_), do: false
defp is_ipvx(tuple, range) do
tuple
|> Tuple.to_list
|> Enum.all?(&(&1 in range))
end
defp int(x) do
case x |> Integer.parse do
:error -> -1
{a, _} -> a
end
end
end | deps/cidr/lib/cidr.ex | 0.793586 | 0.489259 | cidr.ex | starcoder |
defmodule Day24 do
@moduledoc """
Documentation for Day24.
"""
def part1 do
initial_black_tiles("input.txt")
|> Enum.to_list()
|> Enum.count()
|> IO.puts()
end
def part2 do
"input.txt" |> initial_black_tiles() |> days(100) |> Enum.count() |> IO.puts()
end
def read_input(filename) do
File.stream!(filename)
|> Stream.map(&String.trim/1)
|> Stream.map(fn line ->
Regex.scan(~r/e|se|sw|w|nw|ne/, line)
|> Enum.map(&List.first/1)
|> Enum.map(&String.to_existing_atom/1)
end)
end
def initial_black_tiles(filename) do
read_input(filename)
|> Stream.map(&walk/1)
|> Stream.map(&to_2d/1)
|> Enum.sort()
|> Stream.chunk_by(& &1)
|> Stream.filter(fn visits -> is_odd(Enum.count(visits)) end)
|> Stream.map(&List.first/1)
end
def is_odd(num) do
rem(num, 2) != 0
end
def walk(directions) do
directions
|> Enum.reduce({0, 0, 0}, fn move, {e_w, ne_sw, nw_se} ->
case move do
:e -> {e_w + 1, ne_sw, nw_se}
:w -> {e_w - 1, ne_sw, nw_se}
:se -> {e_w, ne_sw, nw_se - 1}
:nw -> {e_w, ne_sw, nw_se + 1}
:sw -> {e_w, ne_sw - 1, nw_se}
:ne -> {e_w, ne_sw + 1, nw_se}
end
end)
end
def to_2d({e_w, ne_sw, nw_se}) do
{e_w * 2 + ne_sw - nw_se, ne_sw + nw_se}
end
def space_ranges(black_tiles) do
x_axis = black_tiles |> Stream.map(fn {x, _y} -> x end)
y_axis = black_tiles |> Stream.map(fn {_x, y} -> y end)
{(Enum.min(x_axis) - 2)..(Enum.max(x_axis) + 2),
(Enum.min(y_axis) - 1)..(Enum.max(y_axis) + 1)}
end
def neighborhood({x, y}) do
[{x - 1, y + 1}, {x + 1, y + 1}, {x - 1, y - 1}, {x - 2, y}, {x + 1, y - 1}, {x + 2, y}]
end
def odd_or_even(x, y) do
if is_odd(y) do
is_odd(x)
else
!is_odd(x)
end
end
def day(black_tiles) do
{x_range, y_range} = space_ranges(black_tiles)
for y <- y_range do
for x <- x_range, odd_or_even(x, y) do
neighbors =
neighborhood({x, y})
|> Enum.count(fn neighbor -> Enum.member?(black_tiles, neighbor) end)
if Enum.member?(black_tiles, {x, y}) do
# was black
{x, y, if(neighbors == 1 || neighbors == 2, do: :black, else: :white)}
else
# was white
{x, y, if(neighbors == 2, do: :black, else: :white)}
end
end
end
|> List.flatten()
|> Enum.filter(fn {_x, _y, color} -> color == :black end)
|> Enum.map(fn {x, y, _color} -> {x, y} end)
end
def days(tiles, days) do
1..days |> Enum.reduce(tiles, fn _day, tiles -> day(tiles) end)
end
end | day24/lib/day24.ex | 0.611962 | 0.487368 | day24.ex | starcoder |
defmodule ExAws.ElasticLoadBalancing do
@moduledoc """
Operations on AWS ElasticLoadBalancing
AWS ElasticLoadBalancing provides a reliable, scalable, and flexible monitoring solution
for your AWS resources. This module provides functionality for only classic load balancers.
See `ExAws.ElasticLoadBalancingV2` for the API for application and network load balancers.
More information:
* [Elastic Load Balancing User Guide][User_Guide]
* [Elastic Load Balancing API][API_Doc]
* [Amazon Resource Names (ARNs)][ARN_Doc]
[User_Guide]: http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/
[API_Doc]: http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/
[ARN_Doc]: http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
"""
use ExAws.Utils,
format_type: :xml,
non_standard_keys: %{ssl_certificate_id: "SSLCertificateId"}
# version of the AWS API
@version "2012-06-01"
@type tag :: %{key: binary, value: binary}
@type tag_key_only :: %{key: binary}
@type describe_account_limits_opts :: [
marker: binary,
# Minimum value of 1. Maximum value of 400
page_size: integer
]
@type instance :: %{instance_id: binary}
@type describe_instance_health_opts :: [
instances: [instance, ...]
]
@type describe_load_balancers_opts :: [
load_balancer_names: [binary, ...],
starting_token: binary,
max_items: integer,
page_size: integer
]
@type describe_load_balancer_policies_opts :: [
load_balancer_name: binary,
policy_names: [binary, ...]
]
@type describe_load_balancer_policy_types_opts :: [
policy_type_names: [binary, ...]
]
@type load_balancer_attribute :: [
connection_settings_idle_timeout: binary,
cross_zone_load_balancing_enabled: binary,
connection_draining_enabled: boolean,
connection_draining_timeout: integer,
access_log_emit_interval: integer,
access_log_enabled: boolean,
access_log_s3_bucket_prefix: binary,
access_log_s3_bucket_name: binary
]
@type health_check :: %{
healthy_threshold: integer,
interval: integer,
target: binary,
timeout: integer,
unhealthy_threshold: integer
}
@type listener :: %{
instance_port: integer,
instance_protocol: binary,
load_balancer_port: integer,
protocol: binary,
ssl_certificate_id: binary
}
@type policy_attribute :: %{
attribute_name: binary,
attribute_value: binary
}
@type create_load_balancer_policy_opts :: [
policy_attributes: [policy_attribute, ...]
]
@type create_load_balancer_opts :: [
availability_zones: [binary, ...],
scheme: binary,
security_groups: [binary, ...],
subnets: [binary, ...],
tags: [tag, ...]
]
@doc """
Adds the specified tags to the specified load balancer. Each load balancer can have a
maximum of 10 tags
Each tag consists of a key and an optional value. If a tag with the same key is already
associated with the load balancer, `add_tags/2` updates its value.
For more information, see [Tag Your Classic Load Balancer in the Classic Load Balancers
Guide](https://amzn.to/2Ou2dCS).
## Parameters:
* load_balancer_names (`List` of `String`) - The name of the load balancer. You can specify
one load balancer only
* tags (`List` of `t:tag/0`) - the tags to apply to the specified balancer
## Examples:
iex> ExAws.ElasticLoadBalancing.add_tags(["classic1"], [%{key: "Hello", value: "test"}])
%ExAws.Operation.Query{
action: :add_tags,
params: %{
"Action" => "AddTags",
"LoadBalancerNames.member.1" => "classic1",
"Tags.member.1.Key" => "Hello",
"Tags.member.1.Value" => "test",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec add_tags(load_balancer_names :: [binary, ...], tags :: [tag, ...]) ::
ExAws.Operation.Query.t()
def add_tags(load_balancer_names, tags) do
[{:load_balancer_names, load_balancer_names}, {:tags, tags}]
|> build_request(:add_tags)
end
@doc """
Associates one or more security groups with your load balancer in a virtual private cloud (VPC)
The specified security groups override the previously associated security groups.
For more information, see [Security Groups for Load Balancers in a VPC](https://amzn.to/2JDqK9A)
in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* security_groups (`List` of `String`) - The IDs of the security groups to associate with the
load balancer. Note that you cannot specify the name of the security group
## Examples:
iex> ExAws.ElasticLoadBalancing.apply_security_groups_to_load_balancer("mylb", ["sg1", "sg2"])
%ExAws.Operation.Query{
action: :apply_security_groups_to_load_balancer,
params: %{
"Action" => "ApplySecurityGroupsToLoadBalancer",
"LoadBalancerName" => "mylb",
"SecurityGroups.member.1" => "sg1",
"SecurityGroups.member.2" => "sg2",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec apply_security_groups_to_load_balancer(
load_balancer_name :: binary,
security_groups :: [binary, ...]
) :: ExAws.Operation.Query.t()
def apply_security_groups_to_load_balancer(load_balancer_name, security_groups) do
[{:load_balancer_name, load_balancer_name}, {:security_groups, security_groups}]
|> build_request(:apply_security_groups_to_load_balancer)
end
@doc """
Adds one or more subnets to the set of configured subnets for the specified
load balancer
The load balancer evenly distributes requests across all registered subnets. For
more information, see [Add or Remove Subnets for Your Load Balancer in a
VPC](https://amzn.to/2YqKJvD) in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* subnets (`List` of `String`) - the IDs of the subnets to add. You can add only
one subnet per Availability Zone.
## Examples:
iex> ExAws.ElasticLoadBalancing.attach_load_balancer_to_subnets("mylb", ["subnet-3561b05e"])
%ExAws.Operation.Query{
action: :attach_load_balancer_to_subnets,
params: %{
"Action" => "AttachLoadBalancerToSubnets",
"LoadBalancerName" => "mylb",
"Subnets.member.1" => "subnet-3561b05e",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
def attach_load_balancer_to_subnets(load_balancer_name, subnets) do
[{:load_balancer_name, load_balancer_name}, {:subnets, subnets}]
|> build_request(:attach_load_balancer_to_subnets)
end
@doc """
Specifies the health check settings to use when evaluating the health state of your EC2 instances
For more information, see [Configure Health Checks for Your Load Balancer](https://amzn.to/2HWBv4z)
in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* health_check (`t:health_check/0`) - configuration information
## Examples:
iex> ExAws.ElasticLoadBalancing.configure_health_check("mylb",
...> %{healthy_threshold: 2,
...> unhealthy_threshold: 2,
...> target: "HTTP:80/ping",
...> interval: 30,
...> timeout: 3})
%ExAws.Operation.Query{
action: :configure_health_check,
params: %{
"Action" => "ConfigureHealthCheck",
"LoadBalancerName" => "mylb",
"HealthCheck.HealthyThreshold" => 2,
"HealthCheck.UnhealthyThreshold" => 2,
"HealthCheck.Target" => "HTTP:80/ping",
"HealthCheck.Interval" => 30,
"HealthCheck.Timeout" => 3,
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec configure_health_check(load_balancer_name :: binary, health_check :: health_check) ::
ExAws.Operation.Query.t()
def configure_health_check(load_balancer_name, health_check) do
[{:load_balancer_name, load_balancer_name}, {:health_check, health_check}]
|> build_request(:configure_health_check)
end
@doc """
Generates a stickiness policy with sticky session lifetimes that follow
that of an application-generated cookie
This policy can be associated only with HTTP/HTTPS listeners.
This policy is similar to the policy created by `create_lb_cookie_stickiness_policy/3`, except
that the lifetime of the special Elastic Load Balancing cookie, AWSELB, follows the lifetime
of the application-generated cookie specified in the policy configuration. The load balancer
only inserts a new stickiness cookie when the application response includes a new application
cookie.
If the application cookie is explicitly removed or expires, the session stops being sticky
until a new application cookie is issued.
For more information, see [Application-Controlled Session Stickiness](https://amzn.to/2HFsyNz)
in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* policy_name (`String`) - The name of the policy being created. Policy names must consist
of alphanumeric characters and dashes (-). This name must be unique within the set of policies
for this load balancer
* cookie_name (`String`) - The name of the application cookie used for stickiness
## Examples:
iex> ExAws.ElasticLoadBalancing.create_app_cookie_stickiness_policy("mylb", "my-app-sticky-policy", "my-cookie")
%ExAws.Operation.Query{
action: :create_app_cookie_stickiness_policy,
params: %{
"Action" => "CreateAppCookieStickinessPolicy",
"LoadBalancerName" => "mylb",
"PolicyName" => "my-app-sticky-policy",
"CookieName" => "my-cookie",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec create_app_cookie_stickiness_policy(
load_balancer_name :: binary,
policy_name :: binary,
cookie_name :: binary
) :: ExAws.Operation.Query.t()
def create_app_cookie_stickiness_policy(load_balancer_name, policy_name, cookie_name) do
[
{:load_balancer_name, load_balancer_name},
{:policy_name, policy_name},
{:cookie_name, cookie_name}
]
|> build_request(:create_app_cookie_stickiness_policy)
end
@doc """
Generates a stickiness policy with sticky session lifetimes controlled
by the lifetime of the browser (user-agent) or a specified expiration period
This policy can be associated only with HTTP/HTTPS listeners.
When a load balancer implements this policy, the load balancer uses a special
cookie to track the instance for each request. When the load balancer receives
a request, it first checks to see if this cookie is present in the request. If
so, the load balancer sends the request to the application server specified in
the cookie. If not, the load balancer sends the request to a server that is
chosen based on the existing load-balancing algorithm.
A cookie is inserted into the response for binding subsequent requests from the
same user to that server. The validity of the cookie is based on the cookie
expiration time, which is specified in the policy configuration.
For more information, see [Duration-Based Session Stickiness](https://amzn.to/2CzNbX9)
in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* policy_name (`String`) - The name of the policy being created. Policy names must consist
of alphanumeric characters and dashes (-). This name must be unique within the set of policies
for this load balancer
* cookie_expiration_period (`Integer`) - The time period, in seconds, after which the cookie
should be considered stale. If you do not specify this parameter, the default value is 0,
which indicates that the sticky session should last for the duration of the browser session
## Examples:
iex> ExAws.ElasticLoadBalancing.create_lb_cookie_stickiness_policy("mylb", "my-app-sticky-policy", 60)
%ExAws.Operation.Query{
action: :create_lb_cookie_stickiness_policy,
params: %{
"Action" => "CreateLBCookieStickinessPolicy",
"LoadBalancerName" => "mylb",
"PolicyName" => "my-app-sticky-policy",
"CookieExpirationPeriod" => 60,
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec create_lb_cookie_stickiness_policy(
load_balancer_name :: binary,
policy_name :: binary,
cookie_expiration_period :: integer
) :: ExAws.Operation.Query.t()
def create_lb_cookie_stickiness_policy(
load_balancer_name,
policy_name,
cookie_expiration_period
) do
[
{:load_balancer_name, load_balancer_name},
{:policy_name, policy_name},
{:cookie_expiration_period, cookie_expiration_period}
]
|> build_request(:create_lb_cookie_stickiness_policy)
end
@doc """
Creates a Classic Load Balancer.
You can add listeners, security groups, subnets, and tags when you create your
load balancer, or you can add them later using `create_load_balancer_listeners/2`,
`apply_security_groups_to_load_balancer/2`, `attach_load_balancer_to_subnets/2`, and
`add_tags/2`.
To describe your current load balancers, see `describe_load_balancers/1`. When you
are finished with a load balancer, you can delete it using `delete_load_balancer/1`.
You can create up to 20 load balancers per region per account. You can request an
increase for the number of load balancers for your account. For more information,
see [Limits for Your Classic Load Balancer](https://amzn.to/2uxqzSW) in the
Classic Load Balancers Guide.
"""
@spec create_load_balancer(load_balancer_name :: binary, listeners :: [listener, ...]) ::
ExAws.Operation.Query.t()
@spec create_load_balancer(
load_balancer_name :: binary,
listeners :: [listener, ...],
opts :: create_load_balancer_opts
) :: ExAws.Operation.Query.t()
def create_load_balancer(load_balancer_name, listeners, opts \\ []) do
[
{:load_balancer_name, load_balancer_name},
{:listeners, listeners} | opts
]
|> build_request(:create_load_balancer)
end
@doc """
Creates one or more listeners for the specified load balancer
If a listener with the specified port does not already exist, it is
created; otherwise, the properties of the new listener must match the
properties of the existing listener.
For more information, see [Listeners for Your Classic Load Balancer](https://amzn.to/2CIUMCS)
in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the load balancer name
* listeners (`List` of `t:listener/0`) - the listeners
## Examples:
iex> ExAws.ElasticLoadBalancing.create_load_balancer_listeners("mylb",
...> [%{protocol: "https", load_balancer_port: 443, instance_port: 443, instance_protocol: "https",
...> ssl_certificate_id: "arn:aws:iam::123456789012"}])
%ExAws.Operation.Query{
action: :create_load_balancer_listeners,
params: %{
"Action" => "CreateLoadBalancerListeners",
"LoadBalancerName" => "mylb",
"Listeners.member.1.Protocol" => "https",
"Listeners.member.1.LoadBalancerPort" => 443,
"Listeners.member.1.InstancePort" => 443,
"Listeners.member.1.InstanceProtocol" => "https",
"Listeners.member.1.SSLCertificateId" => "arn:aws:iam::123456789012",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec create_load_balancer_listeners(load_balancer_name :: binary, listeners :: [listener, ...]) ::
ExAws.Operation.Query.t()
def create_load_balancer_listeners(load_balancer_name, listeners) do
[
{:load_balancer_name, load_balancer_name},
{:listeners, listeners}
]
|> build_request(:create_load_balancer_listeners)
end
@doc """
Creates a policy with the specified attributes for the specified load balancer
Policies are settings that are saved for your load balancer and that can be applied
to the listener or the application server, depending on the policy type.
## Parameters:
* load_balancer_name (`String`) - the load balancer name
* policy_name (`String`) - the name of the load balancer policy to be created. This
name must be unique within the set of policies for this load balancer
* policy_type_name (`String`) - the name of the base policy type. To get the list of
policy types, use `describe_load_balancer_policy_types/1`
* opts (`t:create_load_balancer_policy_opts/0`) - optional policy attributes
## Examples:
iex> ExAws.ElasticLoadBalancing.create_load_balancer_policy("mylb",
...> "EnableProxyProtocol", "ProxyProtocolPolicyType", policy_attributes: [%{attribute_name: "ProxyProtocol", attribute_value: true}])
%ExAws.Operation.Query{
action: :create_load_balancer_policy,
params: %{
"Action" => "CreateLoadBalancerPolicy",
"LoadBalancerName" => "mylb",
"PolicyAttributes.member.1.AttributeName" => "ProxyProtocol",
"PolicyAttributes.member.1.AttributeValue" => true,
"PolicyName" => "EnableProxyProtocol",
"PolicyTypeName" => "ProxyProtocolPolicyType",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec create_load_balancer_policy(
load_balancer_name :: binary,
policy_name :: binary,
policy_type_name :: binary
) :: ExAws.Operation.Query.t()
@spec create_load_balancer_policy(
load_balancer_name :: binary,
policy_name :: binary,
policy_type_name :: binary,
opts :: create_load_balancer_policy_opts
) :: ExAws.Operation.Query.t()
def create_load_balancer_policy(load_balancer_name, policy_name, policy_type_name, opts \\ []) do
[
{:load_balancer_name, load_balancer_name},
{:policy_name, policy_name},
{:policy_type_name, policy_type_name} | opts
]
|> build_request(:create_load_balancer_policy)
end
@doc """
Deletes the specified load balancer
If you are attempting to recreate a load balancer, you must reconfigure all
settings. The DNS name associated with a deleted load balancer are no longer
usable. The name and associated DNS record of the deleted load balancer no longer
exist and traffic sent to any of its IP addresses is no longer delivered to your
instances.
If the load balancer does not exist or has already been deleted, the call to
`delete_load_balancer/1` still succeeds.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
## Examples:
iex> ExAws.ElasticLoadBalancing.delete_load_balancer("mylb")
%ExAws.Operation.Query{
action: :delete_load_balancer,
params: %{
"Action" => "DeleteLoadBalancer",
"LoadBalancerName" => "mylb",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec delete_load_balancer(load_balancer_name :: binary) :: ExAws.Operation.Query.t()
def delete_load_balancer(load_balancer_name) do
[{:load_balancer_name, load_balancer_name}]
|> build_request(:delete_load_balancer)
end
@doc """
Deletes the specified listeners from the specified load balancer
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* ports (`List` of `Integer`) - The client port numbers of the listeners
## Examples:
iex> ExAws.ElasticLoadBalancing.delete_load_balancer_listeners("mylb", [8001, 8002])
%ExAws.Operation.Query{
action: :delete_load_balancer_listeners,
params: %{
"Action" => "DeleteLoadBalancerListeners",
"LoadBalancerName" => "mylb",
"LoadBalancerPorts.member.1" => 8001,
"LoadBalancerPorts.member.2" => 8002,
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec delete_load_balancer_listeners(load_balancer_name :: binary, ports :: [integer, ...]) ::
ExAws.Operation.Query.t()
def delete_load_balancer_listeners(load_balancer_name, ports) do
[{:load_balancer_name, load_balancer_name}, {:load_balancer_ports, ports}]
|> build_request(:delete_load_balancer_listeners)
end
@doc """
Deletes the specified policy from the specified load balancer
This policy must not be enabled for any listeners.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* policy_name (`String`) - The name of the policy
## Examples:
iex> ExAws.ElasticLoadBalancing.delete_load_balancer_policy("mylb", "my-policy")
%ExAws.Operation.Query{
action: :delete_load_balancer_policy,
params: %{
"Action" => "DeleteLoadBalancerPolicy",
"LoadBalancerName" => "mylb",
"PolicyName" => "my-policy",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
def delete_load_balancer_policy(load_balancer_name, policy_name) do
[{:load_balancer_name, load_balancer_name}, {:policy_name, policy_name}]
|> build_request(:delete_load_balancer_policy)
end
@doc """
Deregisters the specified instances from the specified load balancer
After the instance is deregistered, it no longer receives traffic from the load balancer.
You can use `describe_load_balancers/1` to verify that the instance is deregistered
from the load balancer.
For more information, see [Register or De-Register EC2 Instances](https://amzn.to/2urqjVB)
in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* instances (`List` of `String`) - the IDs of the instances
## Examples:
iex> ExAws.ElasticLoadBalancing.deregister_instances_from_load_balancer("mylb", [%{instance_id: "i-12345678"}])
%ExAws.Operation.Query{
action: :deregister_instances_from_load_balancer,
params: %{
"Action" => "DeregisterInstancesFromLoadBalancer",
"LoadBalancerName" => "mylb",
"Instances.member.1.InstanceId" => "i-12345678",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
def deregister_instances_from_load_balancer(load_balancer_name, instances) do
[{:load_balancer_name, load_balancer_name}, {:instances, instances}]
|> build_request(:deregister_instances_from_load_balancer)
end
@doc """
Describes the current Elastic Load Balancing resource limits for your AWS account
## Examples:
iex> ExAws.ElasticLoadBalancing.describe_account_limits()
%ExAws.Operation.Query{
action: :describe_account_limits,
params: %{
"Action" => "DescribeAccountLimits",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec describe_account_limits() :: ExAws.Operation.Query.t()
@spec describe_account_limits(opts :: describe_account_limits_opts) :: ExAws.Operation.Query.t()
def describe_account_limits(opts \\ []) do
opts |> build_request(:describe_account_limits)
end
@doc """
Describes the state of the specified instances with respect to the specified load balancer
If no instances are specified, the call describes the state of all instances that are currently
registered with the load balancer. If instances are specified, their state is returned even if
they are no longer registered with the load balancer. The state of terminated instances is not
returned.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* opts (`t:describe_instance_health_opts/0`) - optionally provide a list of instance ids
## Examples:
iex> ExAws.ElasticLoadBalancing.describe_instance_health("mylb")
%ExAws.Operation.Query{
action: :describe_instance_health,
params: %{
"Action" => "DescribeInstanceHealth",
"Version" => "2012-06-01",
"LoadBalancerName" => "mylb"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
iex> ExAws.ElasticLoadBalancing.describe_instance_health("mylb", [instances: [%{instance_id: "i-12345678"}]])
%ExAws.Operation.Query{
action: :describe_instance_health,
params: %{
"Action" => "DescribeInstanceHealth",
"Version" => "2012-06-01",
"LoadBalancerName" => "mylb",
"Instances.member.1.InstanceId" => "i-12345678",
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec describe_instance_health(load_balancer_name :: binary) :: ExAws.Operation.Query.t()
@spec describe_instance_health(
load_balancer_name :: binary,
opts :: describe_instance_health_opts
) :: ExAws.Operation.Query.t()
def describe_instance_health(load_balancer_name, opts \\ []) do
[
{:load_balancer_name, load_balancer_name} | opts
]
|> build_request(:describe_instance_health)
end
@doc """
Describes the attributes for the specified load balancer
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
## Examples:
iex> ExAws.ElasticLoadBalancing.describe_load_balancer_attributes("mylb")
%ExAws.Operation.Query{
action: :describe_load_balancer_attributes,
params: %{
"Action" => "DescribeLoadBalancerAttributes",
"Version" => "2012-06-01",
"LoadBalancerName" => "mylb"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec describe_load_balancer_attributes(load_balancer_name :: binary) ::
ExAws.Operation.Query.t()
def describe_load_balancer_attributes(load_balancer_name) do
[{:load_balancer_name, load_balancer_name}]
|> build_request(:describe_load_balancer_attributes)
end
@doc """
Describes the specified policies
If you specify a load balancer name, the action returns the descriptions of all
policies created for the load balancer. If you specify a policy name associated
with your load balancer, the action returns the description of that policy. If you
don't specify a load balancer name, the action returns descriptions of the specified
sample policies, or descriptions of all sample policies. The names of the sample
policies have the ELBSample- prefix.
"""
@spec describe_load_balancer_policies() :: ExAws.Operation.Query.t()
@spec describe_load_balancer_policies(opts :: describe_load_balancer_policies_opts) ::
ExAws.Operation.Query.t()
def describe_load_balancer_policies(opts \\ []) do
opts
|> build_request(:describe_load_balancer_policies)
end
@doc """
Describes the specified load balancer policy types or all load balancer policy types.
The description of each type indicates how it can be used. For example, some policies
can be used only with layer 7 listeners, some policies can be used only with layer 4
listeners, and some policies can be used only with your EC2 instances.
You can use `create_load_balancer_policy/4` to create a policy configuration for any of these
policy types. Then, depending on the policy type, use either
`set_load_balancer_policies_of_listener/3` or
`set_load_balancer_policies_for_backend_server/3` to set the policy.
"""
@spec describe_load_balancer_policy_types() :: ExAws.Operation.Query.t()
@spec describe_load_balancer_policy_types(opts :: describe_load_balancer_policy_types_opts) ::
ExAws.Operation.Query.t()
def describe_load_balancer_policy_types(opts \\ []) do
opts
|> build_request(:describe_load_balancer_policy_types)
end
@doc """
Describes the specified the load balancers
If no load balancers are specified, the call describes all of your load balancers.
To describe the attributes for a load balancer, use `describe_load_balancer_attributes/1`.
The options that can be passed into `describe_load_balancers/1` allow load_balancer_arns or names
(there would not be a reason ordinarily to specify both). Elastic Load Balancing provides
two versions of ARNS (one for Classic and one for Application Load Balancer). The syntax for
each is below:
Classic Load Balancer ARN Syntax:
arn:aws:elasticloadbalancing:region:account-id:loadbalancer/name
Application Load Balancer ARN Syntax:
arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id
arn:aws:elasticloadbalancing:region:account-id:listener/app/load-balancer-name/load-balancer-id/listener-id
arn:aws:elasticloadbalancing:region:account-id:listener-rule/app/load-balancer-name/load-balancer-id/listener-id/rule-id
arn:aws:elasticloadbalancing:region:account-id:targetgroup/target-group-name/target-group-id
## Examples:
iex> ExAws.ElasticLoadBalancing.describe_load_balancers()
%ExAws.Operation.Query{
action: :describe_load_balancers,
params: %{"Action" => "DescribeLoadBalancers", "Version" => "2012-06-01"},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec describe_load_balancers() :: ExAws.Operation.Query.t()
@spec describe_load_balancers(opts :: describe_load_balancers_opts) :: ExAws.Operation.Query.t()
def describe_load_balancers(opts \\ []) do
opts |> build_request(:describe_load_balancers)
end
@doc """
Describes the tags associated with the specified load balancers
## Parameters:
* load_balancer_names (`List` of `String`) - the names of the load balancers. Minimum number of 1 item.
Maximum number of 20 items
## Examples:
iex(1)> ExAws.ElasticLoadBalancing.describe_tags(["load_balancer_name1", "load_balancer_name2"])
%ExAws.Operation.Query{
action: :describe_tags,
params: %{
"Action" => "DescribeTags",
"LoadBalancerNames.member.1" => "load_balancer_name1",
"LoadBalancerNames.member.2" => "load_balancer_name2",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec describe_tags(load_balancer_names :: [binary, ...]) :: ExAws.Operation.Query.t()
def describe_tags(load_balancer_names) do
[{:load_balancer_names, load_balancer_names}]
|> build_request(:describe_tags)
end
@doc """
Removes the specified subnets from the set of configured subnets for the load balancer.
After a subnet is removed, all EC2 instances registered with the load balancer in the
removed subnet go into the OutOfService state. Then, the load balancer balances the
traffic among the remaining routable subnets.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* subnets (`List` of `String`) - the IDs of the subnets
## Examples:
iex> ExAws.ElasticLoadBalancing.detach_load_balancer_from_subnets("mylb", ["subnet1"])
%ExAws.Operation.Query{
action: :detach_load_balancer_from_subnets,
params: %{
"Action" => "DetachLoadBalancerFromSubnets",
"LoadBalancerName" => "mylb",
"Subnets.member.1" => "subnet1",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec detach_load_balancer_from_subnets(load_balancer_name :: binary, subnets :: [binary, ...]) ::
ExAws.Operation.Query.t()
def detach_load_balancer_from_subnets(load_balancer_name, subnets) do
[{:load_balancer_name, load_balancer_name}, {:subnets, subnets}]
|> build_request(:detach_load_balancer_from_subnets)
end
@doc """
Removes the specified Availability Zones from the set of Availability Zones
for the specified load balancer in EC2-Classic or a default VPC.
For load balancers in a non-default VPC, use `detach_load_balancer_from_subnets/2`.
There must be at least one Availability Zone registered with a load balancer at
all times. After an Availability Zone is removed, all instances registered with
the load balancer that are in the removed Availability Zone go into the OutOfService
state. Then, the load balancer attempts to equally balance the traffic among its
remaining Availability Zones.
For more information, see [Add or Remove Availability Zones](https://amzn.to/2WtKE8q)
in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* availability_zones (`List` of `String`) - The Availability Zones. These must be in
the same region as the load balancer.
## Examples:
iex> ExAws.ElasticLoadBalancing.disable_availability_zones_for_load_balancer("mylb", ["us-east-1c"])
%ExAws.Operation.Query{
action: :disable_availability_zones_for_load_balancer,
params: %{
"Action" => "DisableAvailabilityZonesForLoadBalancer",
"LoadBalancerName" => "mylb",
"AvailabilityZones.member.1" => "us-east-1c",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec disable_availability_zones_for_load_balancer(
load_balancer_name :: binary,
availability_zones :: [binary, ...]
) :: ExAws.Operation.Query.t()
def disable_availability_zones_for_load_balancer(load_balancer_name, availability_zones) do
[{:load_balancer_name, load_balancer_name}, {:availability_zones, availability_zones}]
|> build_request(:disable_availability_zones_for_load_balancer)
end
@doc """
Adds the specified Availability Zones to the set of Availability Zones for the specified
load balancer in EC2-Classic or a default VPC.
For load balancers in a non-default VPC, use `attach_load_balancer_to_subnets/2`.
The load balancer evenly distributes requests across all its registered Availability Zones
that contain instances. For more information, see [Add or Remove Availability Zones]((https://amzn.to/2WtKE8q))
in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* availability_zones (`List` of `String`) - The Availability Zones. These must be in
the same region as the load balancer.
## Examples:
iex> ExAws.ElasticLoadBalancing.enable_availability_zones_for_load_balancer("mylb", ["us-east-1c"])
%ExAws.Operation.Query{
action: :enable_availability_zones_for_load_balancer,
params: %{
"Action" => "EnableAvailabilityZonesForLoadBalancer",
"LoadBalancerName" => "mylb",
"AvailabilityZones.member.1" => "us-east-1c",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec enable_availability_zones_for_load_balancer(
load_balancer_name :: binary,
availability_zones :: [binary, ...]
) :: ExAws.Operation.Query.t()
def enable_availability_zones_for_load_balancer(load_balancer_name, availability_zones) do
[{:load_balancer_name, load_balancer_name}, {:availability_zones, availability_zones}]
|> build_request(:enable_availability_zones_for_load_balancer)
end
@doc """
Modifies the attributes of the specified load balancer.
You can modify the load balancer attributes, such as AccessLogs, ConnectionDraining,
and CrossZoneLoadBalancing by either enabling or disabling them. Or, you can modify
the load balancer attribute ConnectionSettings by specifying an idle connection
timeout value for your load balancer.
For more information, see the following in the Classic Load Balancers Guide:
* [Cross-Zone Load Balancing](https://amzn.to/2XhXnwe)
* [Connection Draining](https://amzn.to/2JLr9af)
* [Access Logs](https://amzn.to/2RhLiD9)
* [Idle Connection Timeout](https://amzn.to/2U0jGZu)
"""
@spec modify_load_balancer_attributes(
load_balancer_name :: binary,
load_balancer_attributes :: [load_balancer_attribute, ...]
) :: ExAws.Operation.Query.t()
def modify_load_balancer_attributes(load_balancer_name, load_balancer_attributes) do
[{:load_balancer_name, load_balancer_name} | load_balancer_attributes]
|> build_request(:modify_load_balancer_attributes)
end
@doc """
Adds the specified instances to the specified load balancer.
The instance must be a running instance in the same network as the load balancer
(EC2-Classic or the same VPC). If you have EC2-Classic instances and a load balancer
in a VPC with ClassicLink enabled, you can link the EC2-Classic instances to that
VPC and then register the linked EC2-Classic instances with the load balancer in the VPC.
Note that `register_instances_with_load_balancer/2` completes when the request has been registered.
Instance registration takes a little time to complete. To check the state of the registered
instances, use `describe_load_balancers/1` or `describe_instance_health/2`.
After the instance is registered, it starts receiving traffic and requests from the load
balancer. Any instance that is not in one of the Availability Zones registered for the load
balancer is moved to the OutOfService state. If an Availability Zone is added to the load
balancer later, any instances registered with the load balancer move to the InService state.
To deregister instances from a load balancer, use `deregister_instances_from_load_balancer/2`.
For more information, see [Register or De-Register EC2 Instances](https://amzn.to/2urqjVB)
in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* instances (`List` of `String`) - the IDs of the instances.
## Examples:
iex> ExAws.ElasticLoadBalancing.register_instances_with_load_balancer("mylb", [%{instance_id: "i-12345678"}])
%ExAws.Operation.Query{
action: :register_instances_with_load_balancer,
params: %{
"Action" => "RegisterInstancesWithLoadBalancer",
"LoadBalancerName" => "mylb",
"Instances.member.1.InstanceId" => "i-12345678",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec register_instances_with_load_balancer(
load_balancer_name :: binary,
instances :: [instance, ...]
) :: ExAws.Operation.Query.t()
def register_instances_with_load_balancer(load_balancer_name, instances) do
[{:load_balancer_name, load_balancer_name}, {:instances, instances}]
|> build_request(:register_instances_with_load_balancer)
end
@doc """
Removes one or more tags from the specified load balancer.
## Parameters:
* load_balancer_names (`List` of `String`) - the name of the load balancer. You can
specify a maximum of one load balancer name
* tag_keys (`List` of `t:tag_key_only/0`) - the keys for the tags to remove
## Examples:
iex> ExAws.ElasticLoadBalancing.remove_tags(["mylb"], [%{key: "department"}, %{key: "project"}])
%ExAws.Operation.Query{
action: :remove_tags,
params: %{
"Action" => "RemoveTags",
"LoadBalancerNames.member.1" => "mylb",
"Tags.member.1.Key" => "department",
"Tags.member.2.Key" => "project",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec remove_tags(load_balancer_names :: [binary, ...], tag_keys :: [tag_key_only, ...]) ::
ExAws.Operation.Query.t()
def remove_tags(load_balancer_name, tag_keys) do
[{:load_balancer_names, load_balancer_name}, {:tags, tag_keys}]
|> build_request(:remove_tags)
end
@doc """
Sets the certificate that terminates the specified listener's SSL connections
The specified certificate replaces any prior certificate that was used on the
same load balancer and port.
For more information about updating your SSL certificate, see [Replace the SSL
Certificate for Your Load Balancer](https://amzn.to/2HHSs3p) in the Classic
Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* ssl_certificate_id (`String`) - the Amazon Resource Name (ARN) of the SSL certificate
* load_balancer_port (`Integer`) - the port that uses the specified SSL certificate
## Examples:
iex> ExAws.ElasticLoadBalancing.set_load_balancer_listener_ssl_certificate("mylb", "arn:aws:iam::123456789012", 443)
%ExAws.Operation.Query{
action: :set_load_balancer_listener_ssl_certificate,
params: %{
"Action" => "SetLoadBalancerListenerSSLCertificate",
"LoadBalancerName" => "mylb",
"LoadBalancerPort" => 443,
"SSLCertificateId" => "arn:aws:iam::123456789012",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec set_load_balancer_listener_ssl_certificate(
load_balancer_name :: binary,
ssl_certificate_id :: binary,
load_balancer_port :: integer
) :: ExAws.Operation.Query.t()
def set_load_balancer_listener_ssl_certificate(
load_balancer_name,
ssl_certificate_id,
load_balancer_port
) do
[
{:load_balancer_name, load_balancer_name},
{:ssl_certificate_id, ssl_certificate_id},
{:load_balancer_port, load_balancer_port}
]
|> build_request(:set_load_balancer_listener_ssl_certificate)
end
@doc """
Replaces the set of policies associated with the specified port on which the
EC2 instance is listening with a new set of policies
At this time, only the back-end server authentication policy type can be applied
to the instance ports; this policy type is composed of multiple public key policies.
Each time you use `set_load_balancer_policies_for_backend_server/3` to enable the policies, use
the PolicyNames parameter to list the policies that you want to enable.
You can use `describe_load_balancers/1` or `describe_load_balancer_policies/1` to verify that the
policy is associated with the EC2 instance.
For more information about enabling back-end instance authentication, see
[Configure Back-end Instance Authentication](https://amzn.to/2TDGppd) in the Classic
Load Balancers Guide. For more information about Proxy Protocol, see [Configure Proxy
Protocol Support](https://amzn.to/2HHelQd) in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* policy_names (`List` of `String`) - the names of the policies. If the list is empty,
then all current polices are removed from the EC2 instance
* instance_port (`Integer`) - the port number associated with the EC2 instance
## Examples:
iex> ExAws.ElasticLoadBalancing.set_load_balancer_policies_for_backend_server("mylb", ["EnableProxyProtocol", "my-policy2"], 80)
%ExAws.Operation.Query{
action: :set_load_balancer_policies_for_backend_server,
params: %{
"Action" => "SetLoadBalancerPoliciesForBackendServer",
"InstancePort" => 80,
"LoadBalancerName" => "mylb",
"PolicyNames.member.1" => "EnableProxyProtocol",
"PolicyNames.member.2" => "my-policy2",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec set_load_balancer_policies_for_backend_server(
load_balancer_name :: binary,
policy_names :: [binary, ...],
instance_port :: integer
) :: ExAws.Operation.Query.t()
def set_load_balancer_policies_for_backend_server(
load_balancer_name,
policy_names,
instance_port
) do
[
{:load_balancer_name, load_balancer_name},
{:policy_names, policy_names},
{:instance_port, instance_port}
]
|> build_request(:set_load_balancer_policies_for_backend_server)
end
@doc """
Replaces the current set of policies for the specified load balancer port with the specified set of policies.
To enable back-end server authentication, use `set_load_balancer_policies_for_backend_server/3`.
For more information about setting policies, see [Update the SSL Negotiation Configuration](https://amzn.to/2WuPGlj),
[Duration-Based Session Stickiness](https://amzn.to/2CzNbX9), and [Application-Controlled Session
Stickiness](https://amzn.to/2HFsyNz) in the Classic Load Balancers Guide.
## Parameters:
* load_balancer_name (`String`) - the name of the load balancer
* policy_names (`List` of `String`) - the names of the policies. If the list is empty,
then all current polices are removed from the EC2 instance
* load_balancer_port (`Integer`) - the external port of the load balancer
## Examples:
iex> ExAws.ElasticLoadBalancing.set_load_balancer_policies_of_listener("mylb", ["my-SSLNegotiation-policy"], 443)
%ExAws.Operation.Query{
action: :set_load_balancer_policies_of_listener,
params: %{
"Action" => "SetLoadBalancerPoliciesOfListener",
"LoadBalancerPort" => 443,
"LoadBalancerName" => "mylb",
"PolicyNames.member.1" => "my-SSLNegotiation-policy",
"Version" => "2012-06-01"
},
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2,
path: "/",
service: :elasticloadbalancing
}
"""
@spec set_load_balancer_policies_of_listener(
load_balancer_name :: binary,
policy_names :: [binary, ...],
load_balancer_port :: integer
) :: ExAws.Operation.Query.t()
def set_load_balancer_policies_of_listener(load_balancer_name, policy_names, load_balancer_port) do
[
{:load_balancer_name, load_balancer_name},
{:policy_names, policy_names},
{:load_balancer_port, load_balancer_port}
]
|> build_request(:set_load_balancer_policies_of_listener)
end
defp build_request(opts, action) do
opts
|> Enum.flat_map(&format_param/1)
|> request(action)
end
defp request(params, action) do
action_string = action |> camelize_action()
%ExAws.Operation.Query{
path: "/",
params:
params
|> filter_nil_params
|> Map.put("Action", action_string)
|> Map.put("Version", @version),
service: :elasticloadbalancing,
action: action,
parser: &ExAws.ElasticLoadBalancing.Parsers.parse/2
}
end
defp camelize_action(:create_lb_cookie_stickiness_policy) do
"CreateLBCookieStickinessPolicy"
end
defp camelize_action(:set_load_balancer_listener_ssl_certificate) do
"SetLoadBalancerListenerSSLCertificate"
end
defp camelize_action(action) do
action |> Atom.to_string() |> Macro.camelize()
end
defp format_param({:tags, tags}) do
tags |> format(prefix: "Tags.member")
end
defp format_param({:ssl_certificate_id, ssl_certificate_id}) do
ssl_certificate_id |> format(prefix: "SSLCertificateId")
end
defp format_param({:security_groups, security_groups}) do
security_groups |> format(prefix: "SecurityGroups.member")
end
defp format_param({:subnets, subnets}) do
subnets |> format(prefix: "Subnets.member")
end
defp format_param({:listeners, listeners}) do
listeners |> format(prefix: "Listeners.member")
end
defp format_param({:availability_zones, availability_zones}) do
availability_zones |> format(prefix: "AvailabilityZones.member")
end
defp format_param({:load_balancer_ports, ports}) do
ports |> format(prefix: "LoadBalancerPorts.member")
end
defp format_param({:instances, instances}) do
instances |> format(prefix: "Instances.member")
end
defp format_param({:load_balancer_names, load_balancer_names}) do
load_balancer_names |> format(prefix: "LoadBalancerNames.member")
end
defp format_param({:policy_type_names, policy_type_names}) do
policy_type_names |> format(prefix: "PolicyTypeNames.member")
end
defp format_param({:policy_names, policy_names}) do
policy_names |> format(prefix: "PolicyNames.member")
end
defp format_param({:policy_attributes, policy_attributes}) do
policy_attributes |> format(prefix: "PolicyAttributes.member")
end
defp format_param({:max_items, max_items}) do
max_items |> format(prefix: "MaxItems")
end
defp format_param({:page_size, page_size}) do
page_size |> format(prefix: "PageSize")
end
defp format_param({:starting_token, starting_token}) do
starting_token |> format(prefix: "StartingToken")
end
defp format_param({:connection_settings_idle_timeout, timeout}) do
timeout |> format(prefix: "LoadBalancerAttributes.ConnectionSettings.IdleTimeout")
end
defp format_param({:cross_zone_load_balancing_enabled, enabled}) do
enabled |> format(prefix: "LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled")
end
defp format_param({:connection_draining_enabled, connection_draining_enabled}) do
connection_draining_enabled
|> format(prefix: "LoadBalancerAttributes.ConnectionDraining.Enabled")
end
defp format_param({:connection_draining_timeout, connection_draining_timeout}) do
connection_draining_timeout
|> format(prefix: "LoadBalancerAttributes.ConnectionDraining.Timeout")
end
defp format_param({:access_log_emit_interval, access_log_emit_interval}) do
access_log_emit_interval |> format(prefix: "LoadBalancerAttributes.AccessLog.EmitInterval")
end
defp format_param({:access_log_enabled, access_log_enabled}) do
access_log_enabled |> format(prefix: "LoadBalancerAttributes.AccessLog.Enabled")
end
defp format_param({:access_log_s3_bucket_prefix, access_log_s3_bucket_prefix}) do
access_log_s3_bucket_prefix
|> format(prefix: "LoadBalancerAttributes.AccessLog.S3BucketPrefix")
end
defp format_param({:access_log_s3_bucket_name, access_log_s3_bucket_name}) do
access_log_s3_bucket_name |> format(prefix: "LoadBalancerAttributes.AccessLog.S3BucketName")
end
defp format_param({:health_check, health_check}) do
health_check |> format(prefix: "HealthCheck")
end
defp format_param({key, parameters}) do
format([{key, parameters}])
end
end | lib/ex_aws/elastic_load_balancing.ex | 0.921296 | 0.403978 | elastic_load_balancing.ex | starcoder |
defmodule Protobuf.JSON.Decode do
@moduledoc false
import Bitwise, only: [bsl: 2]
alias Protobuf.JSON.Utils
@compile {:inline,
field_value: 2,
decode_map: 2,
decode_repeated: 2,
decode_integer: 1,
decode_float: 1,
parse_float: 1,
decode_bytes: 1,
decode_key: 3,
parse_key: 2}
@int32_range -bsl(1, 31)..(bsl(1, 31) - 1)
@int64_range -bsl(1, 63)..(bsl(1, 63) - 1)
@uint32_range 0..(bsl(1, 32) - 1)
@uint64_range 0..(bsl(1, 64) - 1)
@int_ranges %{
int32: @int32_range,
int64: @int64_range,
sint32: @int32_range,
sint64: @int64_range,
sfixed32: @int32_range,
sfixed64: @int64_range,
fixed32: @int32_range,
fixed64: @int64_range,
uint32: @uint32_range,
uint64: @uint64_range
}
@int_types Map.keys(@int_ranges)
@float_types [:float, :double]
def from_json_data(data, module) when is_map(data) and is_atom(module) do
message_props = Utils.message_props(module)
regular = decode_regular_fields(data, message_props)
oneofs = decode_oneof_fields(data, message_props)
module.__default_struct__()
|> struct(regular)
|> struct(oneofs)
end
def from_json_data(data, module) when is_atom(module), do: throw({:bad_message, data})
defp decode_regular_fields(data, %{field_props: field_props}) do
for {_field_num, %{oneof: nil} = prop} <- field_props,
value = field_value(prop, data) do
{prop.name_atom, decode_value(prop, value)}
end
end
defp decode_oneof_fields(data, %{field_props: field_props, oneof: oneofs}) do
for {oneof, index} <- oneofs,
{_field_num, %{oneof: ^index} = prop} <- field_props,
not is_nil(value = field_value(prop, data)) do
{oneof, prop, value}
end
|> Enum.reduce(%{}, fn {oneof, prop, value}, acc ->
Map.update(acc, oneof, {prop.name_atom, decode_value(prop, value)}, fn _ ->
throw({:duplicated_oneof, oneof})
end)
end)
end
defp field_value(%{json_name: json_key, name: name_key}, data) do
case data do
%{^json_key => value} -> value
%{^name_key => value} -> value
_ -> nil
end
end
defp decode_value(%{map?: true} = prop, map), do: decode_map(prop, map)
defp decode_value(%{repeated?: true} = prop, list), do: decode_repeated(prop, list)
defp decode_value(%{repeated?: false} = prop, value), do: decode_singular(prop, value)
defp decode_map(%{type: module, name_atom: field}, map) when is_map(map) do
%{field_props: field_props, field_tags: field_tags} = Utils.message_props(module)
key_type = field_props[field_tags[:key]].type
val_prop = field_props[field_tags[:value]]
for {key, val} <- map, into: %{} do
{decode_key(key_type, key, field), decode_singular(val_prop, val)}
end
end
defp decode_map(prop, bad_map), do: throw({:bad_map, prop.name_atom, bad_map})
defp decode_key(type, key, field) when is_binary(key) do
case parse_key(type, key) do
{:ok, decoded} -> decoded
:error -> throw({:bad_map_key, field, type, key})
end
end
defp decode_key(type, key, field), do: throw({:bad_map_key, field, type, key})
# Map keys can be of any scalar type except float, double and bytes. they
# must always be wrapped in strings. Other types should not compile.
defp parse_key(:string, key), do: {:ok, key}
defp parse_key(:bool, "true"), do: {:ok, true}
defp parse_key(:bool, "false"), do: {:ok, false}
defp parse_key(type, key) when type in @int_types, do: parse_int(key)
defp parse_key(_type, _key), do: :error
defp decode_repeated(prop, value) when is_list(value) do
for val <- value, do: decode_singular(prop, val)
end
defp decode_repeated(prop, value) do
throw({:bad_repeated, prop.name_atom, value})
end
defp decode_singular(%{type: :string} = prop, value) do
if is_binary(value),
do: value,
else: throw({:bad_string, prop.name_atom, value})
end
defp decode_singular(%{type: :bool} = prop, value) do
if is_boolean(value),
do: value,
else: throw({:bad_bool, prop.name_atom, value})
end
defp decode_singular(%{type: type} = prop, value) when type in @int_types do
with {:ok, integer} <- decode_integer(value),
true <- integer in @int_ranges[type] do
integer
else
_ -> throw({:bad_int, prop.name_atom, value})
end
end
defp decode_singular(%{type: type} = prop, value) when type in @float_types do
case decode_float(value) do
{:ok, float} -> float
_ -> throw({:bad_float, prop.name_atom, value})
end
end
defp decode_singular(%{type: :bytes} = prop, value) do
with true <- is_binary(value),
{:ok, bytes} <- decode_bytes(value) do
bytes
else
_ -> throw({:bad_bytes, prop.name_atom})
end
end
defp decode_singular(%{type: {:enum, enum}} = prop, value) do
Map.get_lazy(enum.__reverse_mapping__(), value, fn ->
if is_integer(value) && value in @int32_range,
do: value,
else: throw({:bad_enum, prop.name_atom, value})
end)
end
defp decode_singular(%{type: module, embedded?: true}, value) do
from_json_data(value, module)
end
defp decode_integer(integer) when is_integer(integer), do: {:ok, integer}
defp decode_integer(string) when is_binary(string), do: parse_int(string)
defp decode_integer(_bad), do: :error
defp parse_int(string) do
case Integer.parse(string) do
{int, ""} -> {:ok, int}
_ -> :error
end
end
defp decode_float(float) when is_float(float), do: {:ok, float}
defp decode_float(string) when is_binary(string), do: parse_float(string)
defp decode_float(_bad), do: :error
defp parse_float("-Infinity"), do: {:ok, :negative_infinity}
defp parse_float("Infinity"), do: {:ok, :infinity}
defp parse_float("NaN"), do: {:ok, :nan}
defp parse_float(string) do
case Float.parse(string) do
{float, ""} -> {:ok, float}
_ -> :error
end
end
# Both url-encoded and regular base64 are accepted, with and without padding.
defp decode_bytes(bytes) do
pattern = :binary.compile_pattern(["-", "_"])
if String.contains?(bytes, pattern) do
Base.url_decode64(bytes, padding: false)
else
Base.decode64(bytes, padding: false)
end
end
end | lib/protobuf/json/decode.ex | 0.801781 | 0.473596 | decode.ex | starcoder |
defmodule SMSFactor.Contacts do
@moduledoc """
Wrappers around **Contacts** section of SMSFactor API.
"""
@typedoc """
Params for defining a contact.
## Example
```elixir
{
"value": "33612345676",
"info1": "Hedy",
"info2": "Lamarr",
"info3": "Extase",
"info4": "1933"
}
```
"""
@type contact_params() :: %{atom() => String.t()}
@typedoc """
Params for defining contacts list.
## Example
```elixir
{
"list": {
"listId": 50433,
"contacts": {
"gsm": [
{
"value": "33612345678",
"info1": "Hiroo",
"info2": "Onoda"
},
{
"value": "33612345677",
"info1": "Grace",
"info2": "Hopper"
},
{
"value": "33612345676",
"info1": "Hedy",
"info2": "Lamarr",
"info3": "Extase",
"info4": "1933"
}
]
}
}
}
```
"""
@type contact_list_params() :: %{list: %{atom() => String.t()}}
@spec add_contact(Tesla.Client.t(), contact_list_params()) :: Tesla.Env.result()
def add_contact(client, params), do: Tesla.get(client, "/list", params)
@spec deduplicate_list(Tesla.Client.t(), integer()) :: Tesla.Env.result()
def deduplicate_list(client, list_id), do: Tesla.put(client, "/list/deduplicate/#{list_id}")
@spec remove_contact(Tesla.Client.t(), integer()) :: Tesla.Env.result()
def remove_contact(client, contact_id), do: Tesla.delete(client, "/list/contact/#{contact_id}")
@spec update_contact(Tesla.Client.t(), integer(), contact_params()) :: Tesla.Env.result()
def update_contact(client, contact_id, params) do
Tesla.put(client, "/list/contact/#{contact_id}", params)
end
@spec insert_to_blacklist(Tesla.Client.t(), contact_list_params()) :: Tesla.Env.result()
def insert_to_blacklist(client, params), do: Tesla.post(client, "/blacklist", params)
@spec insert_to_npai_list(Tesla.Client.t(), contact_list_params()) :: Tesla.Env.result()
def insert_to_npai_list(client, params), do: Tesla.post(client, "/npai", params)
end | lib/sms_factor/contacts.ex | 0.790449 | 0.684778 | contacts.ex | starcoder |
defmodule Cizen.Automaton do
@moduledoc """
A saga framework to create an automaton.
"""
alias Cizen.Dispatcher
alias Cizen.EffectHandler
alias Cizen.Event
alias Cizen.Saga
alias Cizen.SagaID
alias Cizen.Automaton.{PerformEffect, Yield}
@finish {__MODULE__, :finish}
def finish, do: @finish
@type finish :: {__MODULE__, :finish}
@type state :: term
@doc """
Invoked when the automaton is spawned.
Saga.Started event will be dispatched after this callback.
Returned value will be used as the next state to pass `c:yield/2` callback.
Returning `Automaton.finish()` will cause the automaton to finish.
If not defined, default implementation is used,
and it passes the given saga struct to `c:yield/2` callback.
"""
@callback spawn(SagaID.t(), Saga.t()) :: finish | state
@doc """
Invoked when last `c:spawn/2` or `c:yield/2` callback returns a next state.
Returned value will be used as the next state to pass `c:yield/2` callback.
Returning `Automaton.finish()` will cause the automaton to finish.
If not defined, default implementation is used,
and it returns `Automaton.finish()`.
"""
@callback yield(SagaID.t(), state) :: finish | state
@doc """
Invoked when the automaton is resumed.
Returned value will be used as the next state to pass `c:yield/2` callback.
Returning `Automaton.finish()` will cause the automaton to finish.
This callback is predefined. The default implementation is here:
```
def respawn(id, saga, state) do
spawn(id, saga)
state
end
```
"""
@callback respawn(SagaID.t(), Saga.t(), state) :: finish | state
defmacro __using__(_opts) do
quote do
alias Cizen.Automaton
import Cizen.Automaton, only: [perform: 2, finish: 0]
require Cizen.Filter
use Saga
@behaviour Automaton
@impl Automaton
def spawn(_id, struct) do
struct
end
@impl Automaton
def respawn(id, saga, state) do
spawn(id, saga)
state
end
@impl Automaton
def yield(_id, _state) do
finish()
end
defoverridable spawn: 2, respawn: 3, yield: 2
@impl Saga
def init(id, struct) do
Automaton.init(id, struct)
end
@impl Saga
def resume(id, struct, state) do
Automaton.resume(id, struct, state)
end
@impl Saga
def handle_event(id, event, state) do
Automaton.handle_event(id, event, state)
end
end
end
@doc """
Performs an effect.
`perform/2` blocks the current block until the effect is resolved,
and returns the result of the effect.
Note that `perform/2` does not work only on the current process.
"""
def perform(id, effect) do
event = Event.new(id, %PerformEffect{handler: id, effect: effect})
Dispatcher.dispatch(event)
Saga.send_to(id, event)
receive do
response -> response
end
end
defp do_yield(module, id, state) do
Dispatcher.dispatch(Event.new(id, %Yield{state: state}))
case state do
@finish ->
Dispatcher.dispatch(Event.new(id, %Saga.Finish{id: id}))
state ->
state = module.yield(id, state)
do_yield(module, id, state)
end
end
def init(id, saga) do
init_with(id, saga, %Saga.Started{id: id}, :spawn, [id, saga])
end
def resume(id, saga, state) do
init_with(id, saga, %Saga.Resumed{id: id}, :respawn, [id, saga, state])
end
defp init_with(id, saga, event, function, arguments) do
module = Saga.module(saga)
pid =
spawn_link(fn ->
try do
state = apply(module, function, arguments)
Dispatcher.dispatch(Event.new(id, event))
do_yield(module, id, state)
rescue
reason -> Saga.exit(id, reason, __STACKTRACE__)
end
end)
handler_state = EffectHandler.init(id)
{Saga.lazy_init(), {pid, handler_state}}
end
def handle_event(_id, %Event{body: %PerformEffect{effect: effect}}, {pid, handler}) do
handle_result(pid, EffectHandler.perform_effect(handler, effect))
end
def handle_event(_id, event, state) do
feed_event(state, event)
end
defp feed_event({pid, handler}, event) do
handle_result(pid, EffectHandler.feed_event(handler, event))
end
defp handle_result(pid, {:resolve, value, state}) do
send(pid, value)
{pid, state}
end
defp handle_result(pid, state) do
{pid, state}
end
end | lib/cizen/automaton.ex | 0.908349 | 0.737489 | automaton.ex | starcoder |
defmodule Blogit.Component do
@moduledoc """
Contains common logic for creating and naming `Blogit` component processes.
A component is a `GenServer`, but instead declaring:
```
use GenServer
```
it should declare:
```
use Blogit.Component
```
This will make it a `GenServer` and will create the `start_link/1` function
for creating the component process for the module.
This will also add the `name/1` function, which creates uniq name/id for the
component based on the given `language`. This name/id is used by the
`Blogit.Components.Supervisor` process.
"""
@doc false
defmacro __using__(options \\ []) do
quote do
default_base_name =
__MODULE__
|> to_string()
|> String.split(".")
|> List.last()
|> Macro.underscore()
base_name_string = Keyword.get(unquote(options), :base_name, default_base_name)
# Code used by every component process module:
use GenServer
alias Blogit.Settings
@base_name base_name_string
@doc """
Returns the base name, which identifies the process. For example it
could be `posts`.
"""
@spec base_name() :: String.t()
def base_name, do: @base_name
@doc """
Returns the name, which identifies the process. It is composed using the
`base_name/0` and the given `language`. For example if the `base_name/0`
returns `posts` and the given language is `en`, the name will be
`posts_en`.
The worker id registered under the `Blogit.Components.Supervisor` will
be the name returned by this function, when `start_link/1` is called
to create the process. The language passed to it
(or the one returned from `Blogit.Settings.default_language/0`) will be
passed to `name/1` to create the name.
"""
@spec name(String.t()) :: atom
def name(language), do: :"#{base_name()}_#{language}"
@doc """
Starts the `GenServer` process.
The process is started and supervised by `Blogit.Components.Supervisor`
and the specification of it is added by `Blogit.Server`.
The state of the process in the beginning is nil.
The process should keep the given `language` passed to `init/1`
as the first of a tuple as part of its state. This process should serve
requests related to that `language`.
The given `state_provider` is the second element of the tuple passed to
`GenServer.init/1`. It could be used to retrieve the state of
the process.
By default the `language` is the one returned by
`Blogit.Settings.default_language/0` and the `state_provider` is
`Blogit.Server`.
"""
@spec name(String.t()) :: GenServer.on_start()
def start_link(
language \\ Settings.default_language(),
state_provider \\ Blogit.Server
) do
args = {language, state_provider}
GenServer.start_link(__MODULE__, args, name: name(language))
end
end
end
end | lib/blogit/component.ex | 0.831485 | 0.816443 | component.ex | starcoder |
defmodule Algae.Writer do
@moduledoc ~S"""
`Algae.Writer` helps capture the pattern of writing to a pure log or accumulated
value, handling the bookkeeping for you.
If `Algae.Reader` is quasi-read-only, `Algae.Writer` is quasi-write-only.
This is often used for loggers, but could be anything as long as the hidden value
is a `Witchcraft.Monoid`.
There are many applications of `Writer`s, but as an illustrative point,
one could use it for logging across processes and time, since the log
is carried around with the result in a pure fashion. The monadic DSL
helps make using these feel more natural.
For an illustrated guide to `Writer`s,
see [Thee Useful Monads](http://adit.io/posts/2013-06-10-three-useful-monads.html#the-state-monad).
## Anatomy
%Algae.Writer{writer: {value, log}}
↑ ↑
# "explicit" value position "hidden" position,
# commonly used as a log
## Examples
iex> use Witchcraft
...>
...> excite =
...> fn string ->
...> monad writer({0.0, "log"}) do
...> tell string
...>
...> excited <- return "#{string}!"
...> tell " => #{excited} ... "
...>
...> return excited
...> end
...> end
...>
...> {_, logs} =
...> "Hi"
...> |> excite.()
...> >>> excite
...> >>> excite
...> |> censor(&String.trim_trailing(&1, " ... "))
...> |> run()
...>
...> logs
"Hi => Hi! ... Hi! => Hi!! ... Hi!! => Hi!!!"
iex> use Witchcraft
...>
...> exponent =
...> fn num ->
...> monad writer({0, 0}) do
...> tell 1
...> return num * num
...> end
...> end
...>
...> initial = 42
...> {result, times} = run(exponent.(initial) >>> exponent >>> exponent)
...>
...> "#{initial}^#{round(:math.pow(2, times))} = #{result}"
"42^8 = 9682651996416"
"""
alias __MODULE__
alias Witchcraft.{Monoid, Unit}
use Witchcraft
@type log :: Monoid.t()
@type value :: any()
@type writer :: {Writer.value(), Writer.log()}
@type t :: %Writer{writer: writer()}
defstruct writer: {0, []}
@doc """
Construct a `Algae.Writer` struct from a starting value and log.
## Examples
iex> new()
%Algae.Writer{writer: {0, []}}
iex> new("hi")
%Algae.Writer{writer: {"hi", []}}
iex> new("ohai", 42)
%Algae.Writer{writer: {"ohai", 42}}
"""
@spec new(any(), Monoid.t()) :: Writer.t()
def new(value \\ 0, log \\ []), do: %Writer{writer: {value, log}}
@doc """
Similar to `new/2`, but taking a tuple rather than separate fields.
## Examples
iex> writer({"ohai", 42})
%Algae.Writer{writer: {"ohai", 42}}
"""
@spec writer(Writer.writer()) :: Writer.t()
def writer({value, log}), do: new(value, log)
@doc ~S"""
Extract the enclosed value and log from an `Algae.Writer`.
## Examples
iex> run(%Algae.Writer{writer: {"hi", "there"}})
{"hi", "there"}
iex> use Witchcraft
...>
...> half =
...> fn num ->
...> monad writer({0.0, ["log"]}) do
...> let half = num / 2
...> tell ["#{num} / 2 = #{half}"]
...> return half
...> end
...> end
...>
...> run(half.(42) >>> half >>> half)
{
5.25,
[
"42 / 2 = 21.0",
"21.0 / 2 = 10.5",
"10.5 / 2 = 5.25"
]
}
"""
@spec run(Writer.t()) :: Writer.value()
def run(%Writer{writer: writer}), do: writer
@doc ~S"""
Set the "log" portion of an `Algae.Writer` step
## Examples
iex> tell("secrets")
%Algae.Writer{writer: {%Witchcraft.Unit{}, "secrets"}}
iex> use Witchcraft
...>
...> monad %Algae.Writer{writer: {"string", 1}} do
...> tell 42
...> tell 43
...> return "hey"
...> end
%Algae.Writer{writer: {"hey", 85}}
iex> use Witchcraft
...>
...> half =
...> fn num ->
...> monad writer({0.0, ["log"]}) do
...> let half = num / 2
...> tell ["#{num} / 2 = #{half}"]
...> return half
...> end
...> end
...>
...> run(half.(42.0) >>> half >>> half)
{
5.25,
[
"42.0 / 2 = 21.0",
"21.0 / 2 = 10.5",
"10.5 / 2 = 5.25"
]
}
"""
@spec tell(Writer.log()) :: Writer.t()
def tell(log), do: new(%Unit{}, log)
@doc """
Copy the log into the value position. This makes it accessible in do-notation.
## Examples
iex> listen(%Algae.Writer{writer: {42, "hi"}})
%Algae.Writer{writer: {{42, "hi"}, "hi"}}
iex> use Witchcraft
...>
...> monad new(1, 1) do
...> wr <- listen tell(42)
...> tell 43
...> return wr
...> end
%Algae.Writer{
writer: {{%Witchcraft.Unit{}, 42}, 85}
}
"""
@spec listen(Writer.t()) :: Writer.t()
def listen(%Writer{writer: {value, log}}), do: %Writer{writer: {{value, log}, log}}
@doc """
Similar to `listen/1`, but with the ability to adjust the copied log.
## Examples
iex> listen(%Algae.Writer{writer: {1, "hi"}}, &String.upcase/1)
%Algae.Writer{
writer: {{1, "HI"}, "hi"}
}
"""
@spec listen(Writer.t(), (log() -> log())) :: Writer.t()
def listen(writer, fun) do
monad writer do
{value, log} <- listen writer
return {value, fun.(log)}
end
end
@doc ~S"""
Run a function in the value portion of an `Algae.Writer` on the log.
Notice that the structure is similar to what somes out of `listen/{1,2}`
Algae.Writer{writer: {{_, function}, log}}
## Examples
iex> pass(%Algae.Writer{writer: {{1, fn x -> x * 10 end}, 42}})
%Algae.Writer{writer: {1, 420}}
iex> use Witchcraft
...>
...> monad new("string", ["logs"]) do
...> a <- ["start"] |> tell() |> listen()
...> tell ["middle"]
...>
...> {value, logs} <- return a
...> pass writer({{value, fn [log | _] -> [log | [log | logs]] end}, logs})
...>
...> tell ["next is 42"]
...> return 42
...> end
%Algae.Writer{
writer: {42, ["start", "middle", "start", "start", "start", "next is 42"]}
}
"""
@spec pass(Writer.t()) :: Writer.t()
def pass(%Writer{writer: {{value, fun}, log}}), do: %Writer{writer: {value, fun.(log)}}
@doc ~S"""
Run a writer, and run a function over the resulting log.
## Examples
iex> 42
...> |> new(["hi", "THERE", "friend"])
...> |> censor(&Enum.reject(&1, fn log -> String.upcase(log) == log end))
...> |> run()
{42, ["hi", "friend"]}
iex> use Witchcraft
...>
...> 0
...> |> new(["logs"])
...> |> monad do
...> tell ["Start"]
...> tell ["BANG!"]
...> tell ["shhhhhhh..."]
...> tell ["LOUD NOISES!!!"]
...> tell ["End"]
...>
...> return 42
...> end
...> |> censor(&Enum.reject(&1, fn log -> String.upcase(log) == log end))
...> |> run()
{42, ["Start", "shhhhhhh...", "End"]}
"""
@spec censor(Writer.t(), (any() -> any())) :: Writer.t()
def censor(writer, fun) do
pass(monad writer do
value <- writer
return {value, fun}
end)
end
end | lib/algae/writer.ex | 0.881258 | 0.496704 | writer.ex | starcoder |
defimpl Transmog.Parser, for: BitString do
@moduledoc """
Implementation of `Transmog.Parser` for strings. Parses strings which are
represented as dot notation and maps them to values in a nested map, struct or
list.
## Examples
"a.:b.c" #=> References a map or list with key path ["a", :b, "c"]
":a.1" #=> References a map or list with key path [:a, "1"]
As you can see there are some caveats to this dot notation:
* You are not able to represent paths that contain anything other than atoms
or strings.
## Examples
iex> string = "credentials.:first_name"
iex> {:ok, key_path} = Transmog.Parser.parse(string)
iex> key_path
["credentials", :first_name]
iex> string = "credentials.:first_name"
iex> Transmog.Parser.parse!(string)
["credentials", :first_name]
#=> Notice: we can escape a period in order for it to be preserved
iex> string = "credentials\\\\.first_name"
iex> Transmog.Parser.parse!(string)
["credentials.first_name"]
#=> Notice: an empty string, like the empty list, is considered invalid
iex> string = ""
iex> Transmog.Parser.parse(string)
{:error, :invalid_key_path}
iex> string = ""
iex> Transmog.Parser.parse!(string)
** (Transmog.InvalidKeyPathError) key path is not valid (\"\")
"""
alias Transmog.InvalidKeyPathError
alias Transmog.Parser
# The token that each part of the path is split on
@token "."
@doc """
`parse/1` parses a string into a key path. If the string is empty then it is
considered invalid and returned immediately. Non-empty strings will be parsed
by splitting on the dot character to generate the path.
Atoms that are found during the parse, ie. strings that are prefixed with a
colon, will be safely converted to an atom.
## Examples
iex> string = "a.:b.c"
iex> {:ok, key_path} = Transmog.Parser.parse(string)
iex> key_path
["a", :b, "c"]
iex> string = "a\\\\.b.c"
iex> {:ok, key_path} = Transmog.Parser.parse(string)
iex> key_path
["a.b", "c"]
iex> string = "a\\\\:b.c"
iex> {:ok, key_path} = Transmog.Parser.parse!(string)
iex> key_path
["a.:b", "c"]
"""
@spec parse(string :: binary) :: {:ok, list(term)} | Parser.error()
def parse(""), do: {:error, :invalid_key_path}
def parse(string) when is_binary(string) do
parts =
string
|> split_on_token()
|> Enum.map(&parse_field/1)
{:ok, parts}
end
@doc """
`parse!/1` parses a string into a key path. If the string is empty then it is
considered invalid and an error is raised. Otherwise the parse will delegate
to `parse/1`.
## Examples
iex> string = "a.:b.c"
iex> Transmog.Parser.parse!(string)
["a", :b, "c"]
iex> string = "a\\\\.b.c"
iex> Transmog.Parser.parse!(string)
["a.b", "c"]
iex> string = "a\\\\:b.c"
iex> Transmog.Parser.parse!(string)
["a.:b", "c"]
iex> string = ""
iex> Transmog.Parser.parse!(string)
** (Transmog.InvalidKeyPathError) key path is not valid (\"\")
"""
@spec parse!(string :: binary) :: list(term)
def parse!(""), do: InvalidKeyPathError.new("")
def parse!(string) when is_binary(string), do: elem(parse(string), 1)
# Parses a single field of the dot notation string. If the field begins with
# a colon, then it is parsed as an atom. Only existing atoms will be used to
# be safe.
@spec parse_field(field :: binary) :: atom | binary
defp parse_field(":" <> field) when is_binary(field), do: String.to_existing_atom(field)
defp parse_field(field) when is_binary(field), do: field
# Helper function which stores the logic for splitting a string on the token
# character. At this time the token character is a period.
@spec split_on_token(string :: binary) :: list(binary)
defp split_on_token(string) when is_binary(string) do
string
|> String.split(~r[(?<!\\)#{Regex.escape(@token)}])
|> Enum.map(&String.replace(&1, "\\", ""))
end
end | lib/transmog/parser/bit_string.ex | 0.899207 | 0.443058 | bit_string.ex | starcoder |
defmodule Engine.DB.Transaction do
@moduledoc """
The Transaction record. This is one of the main entry points for the system, specifically accepting
transactions into the Childchain as `tx_bytes`. This expands those bytes into:
* `tx_bytes` - A binary of a transaction encoded by RLP.
* `inputs` - The outputs that the transaction is acting on, and changes state e.g marked as "spent"
* `outputs` - The newly created outputs
More information is contained in the `tx_bytes`. However, to keep the Childchain _lean_, we extract
data onto the record as needed.
The schema contains the following fields:
- tx_bytes: The signed bytes submited by users
- tx_hash: The keccak hash of the transaction
- tx_type: The type of the transaction, this is an integer. ie: `1` for payment v1 transactions, `3` for fee transactions
- tx_index: index of the transaction in a block
Virtual fields used for convenience and validation:
- witnesses: Avoid decoding/parsing signatures mutiple times along validation process
- signed_tx: Avoid calling decode(tx_bytes) multiple times along the validation process
Note that with the current implementation, fields virtual fields are not populated when loading record from the DB
"""
use Ecto.Schema
alias __MODULE__.TransactionChangeset
alias Ecto.Multi
alias Engine.DB.Block
alias Engine.DB.Output
alias Engine.DB.TransactionFee
alias Engine.Fee
alias Engine.Repo
alias ExPlasma.Encoding
alias ExPlasma.Transaction, as: ExPlasmaTx
require Logger
@type tx_bytes :: binary
@type hex_tx_bytes :: list(binary)
@type batch :: {non_neg_integer, binary, list(ExPlasma.Transaction.t())}
@type t() :: %{
block: Block.t(),
block_id: pos_integer(),
tx_index: non_neg_integer(),
id: pos_integer(),
inputs: list(Output.t()),
inserted_at: DateTime.t(),
outputs: list(Output.t()),
signed_tx: ExPlasma.Transaction.t() | nil,
tx_bytes: binary(),
tx_hash: <<_::256>>,
tx_type: pos_integer(),
updated_at: DateTime.t(),
witnesses: binary()
}
@timestamps_opts [inserted_at: :node_inserted_at, updated_at: :node_updated_at]
schema "transactions" do
field(:tx_bytes, :binary)
field(:tx_hash, :binary)
field(:tx_type, :integer)
field(:tx_index, :integer)
# Virtual fields used for convenience and validation
# Avoid decoding/parsing signatures mutiple times along validation process
field(:witnesses, {:array, :string}, virtual: true)
# Avoid calling decode(tx_bytes) multiple times along the validation process
field(:signed_tx, :map, virtual: true)
belongs_to(:block, Block)
has_many(:inputs, Output, foreign_key: :spending_transaction_id)
has_many(:outputs, Output, foreign_key: :creating_transaction_id)
has_many(:fees, TransactionFee, foreign_key: :transaction_id)
field(:inserted_at, :utc_datetime)
field(:updated_at, :utc_datetime)
timestamps()
end
@doc """
Query a transaction by the given `field`.
Also preload given `preloads`
"""
def get_by(field, preloads) do
__MODULE__
|> Repo.get_by(field)
|> Repo.preload(preloads)
end
@spec encode_unsigned(t()) :: binary()
def encode_unsigned(transaction) do
{:ok, tx} = ExPlasma.decode(transaction.tx_bytes, signed: false)
ExPlasma.encode!(tx, signed: false)
end
@doc """
Inserts a new transaction and associates it with currently forming block.
If including a new transaction in forming block violates maximum number of transaction per block
then the transaction is associated with a newly inserted forming block.
"""
def insert(hex_tx_bytes) do
case decode(hex_tx_bytes) do
{:ok, data} ->
[data]
|> handle_transactions()
|> Repo.transaction()
|> case do
{:ok, result} ->
{:ok, Map.get(result, "transaction-1-of-1")}
{:error, _, changeset, _} ->
_ = Logger.error("Error when inserting transaction changeset #{inspect(changeset)}")
{:error, changeset}
error ->
_ = Logger.error("Error when inserting transaction #{inspect(error)}")
error
end
decode_error ->
_ = Logger.error("Error when inserting transaction decode_error #{inspect(decode_error)}")
decode_error
end
end
@doc """
Inserts a new batch of transactions and associates it with currently forming block.
If including a new transaction in forming block violates maximum number of transaction per block
then the transaction is associated with a newly inserted forming block.
"""
def insert_batch(txs_bytes) do
case decode_batch(txs_bytes) do
{:ok, batch} ->
batch
|> handle_transactions()
|> Repo.transaction()
|> case do
{:ok, _} = result ->
result
{:error, _, changeset, _} ->
_ = Logger.error("Error when inserting transaction changeset #{inspect(changeset)}")
{:error, changeset}
error ->
_ = Logger.error("Error when inserting transaction #{inspect(error)}")
error
end
decode_error ->
_ = Logger.error("Error when inserting transaction decode_error #{inspect(decode_error)}")
decode_error
end
end
@doc """
Inserts a fee transaction associated with a given block and transaction index
"""
def insert_fee_transaction(repo, currency_with_amount, block, fee_tx_index) do
currency_with_amount
|> TransactionChangeset.new_fee_transaction_changeset(block)
|> TransactionChangeset.set_blknum_and_tx_index(%{block: block, next_tx_index: fee_tx_index})
|> repo.insert()
end
defp handle_transactions(batch) do
all_fees = load_fees()
Enum.reduce(batch, Multi.new(), fn {index, tx_bytes, decoded}, multi ->
{:ok, fees} = load_fee(all_fees, decoded.tx_type)
changeset = TransactionChangeset.new_transaction_changeset(%__MODULE__{}, tx_bytes, decoded, fees)
block_with_next_tx_index = "block_with_next_tx_index-#{index}"
multi
|> Multi.run("current_forming_block-#{index}", fn repo, _ -> Block.get_forming_block_for_update(repo) end)
|> Multi.run(block_with_next_tx_index, fn repo, params ->
Block.get_block_and_tx_index_for_transaction(repo, params, index)
end)
|> Multi.insert("transaction-#{index}", fn %{^block_with_next_tx_index => block_with_next_tx_index} ->
TransactionChangeset.set_blknum_and_tx_index(changeset, block_with_next_tx_index)
end)
end)
end
@spec decode(tx_bytes()) :: {:ok, {<<_::48>>, binary(), ExPlasma.Transaction.t()}} | {:error, atom()}
defp decode(hex_tx_bytes) do
with {:ok, tx_bytes} <- Encoding.to_binary(hex_tx_bytes),
{:ok, decoded} <- ExPlasma.decode(tx_bytes),
{:ok, recovered} <- ExPlasmaTx.with_witnesses(decoded) do
{:ok, {"1-of-1", tx_bytes, recovered}}
end
end
@spec decode_batch(hex_tx_bytes()) :: {:ok, list(batch())} | {:error, atom()}
defp decode_batch(hexs_tx_bytes) do
acc = []
index = 0
decode_batch(hexs_tx_bytes, acc, index)
end
defp decode_batch([], acc, _) do
{:ok, Enum.reverse(acc)}
end
defp decode_batch([hex_tx_bytes | hexs_tx_bytes], acc, index) do
with {:ok, tx_bytes} <- Encoding.to_binary(hex_tx_bytes),
{:ok, decoded} <- ExPlasma.decode(tx_bytes),
{:ok, recovered} <- ExPlasmaTx.with_witnesses(decoded) do
decode_batch(hexs_tx_bytes, [{index, tx_bytes, recovered} | acc], index + 1)
end
end
defp load_fees() do
{:ok, all_fees} = Fee.accepted_fees()
all_fees
end
defp load_fee(all_fees, type) do
fees_for_type = Map.get(all_fees, type, {:error, :invalid_transaction_type})
{:ok, fees_for_type}
end
end | apps/engine/lib/engine/db/transaction.ex | 0.910526 | 0.547222 | transaction.ex | starcoder |
defmodule Aoc2021.Day4.BingoBoard do
@moduledoc """
A 5x5 bingo board containing numbers and marked positions.
"""
defmodule Position do
@moduledoc """
A position on the bingo board.
"""
@type row() :: non_neg_integer()
@type col() :: non_neg_integer()
@opaque t() :: {row(), col()}
@spec new(row(), col()) :: t()
def new(row, col), do: {row, col}
@spec row(t()) :: row()
def row({row, _}), do: row
@spec col(t()) :: col()
def col({_, col}), do: col
end
@opaque board() :: %{non_neg_integer() => Position.t()}
defstruct [:board, :marked]
@opaque t() :: %__MODULE__{
board: board(),
marked: MapSet.t(Position.t())
}
@spec new([[non_neg_integer()]]) :: t()
def new(numbers) do
board =
numbers
|> Enum.with_index()
|> Enum.reduce(%{}, &new_row/2)
%__MODULE__{board: board, marked: MapSet.new()}
end
@spec play(t(), non_neg_integer()) :: t()
def play(board, number) do
case Map.get(board.board, number) do
pos when is_tuple(pos) ->
%__MODULE__{board | marked: MapSet.put(board.marked, pos)}
nil ->
board
end
end
@spec win?(t()) :: boolean()
def win?(board) do
Enum.any?(
winning_positions(),
fn positions ->
MapSet.subset?(positions, board.marked)
end
)
end
@spec score(t(), non_neg_integer()) :: non_neg_integer()
def score(board, number) do
sum =
board.board
|> Map.to_list()
|> Enum.reject(fn {_, v} -> MapSet.member?(board.marked, v) end)
|> Enum.map(fn {v, _} -> v end)
|> Enum.sum()
sum * number
end
defp new_row({numbers, row}, acc) do
{acc, _} =
numbers
|> Enum.with_index()
|> Enum.reduce({acc, row}, &new_cell/2)
acc
end
defp new_cell({number, col}, {acc, row}) do
acc = Map.put(acc, number, Position.new(row, col))
{acc, row}
end
def winning_positions, do: winning_rows() ++ winning_cols()
def winning_rows do
for row <- 0..4 do
MapSet.new(
for col <- 0..4 do
Position.new(row, col)
end
)
end
end
def winning_cols do
for col <- 0..4 do
MapSet.new(
for row <- 0..4 do
Position.new(row, col)
end
)
end
end
end | lib/aoc2021/day4/bingo_board.ex | 0.862974 | 0.620579 | bingo_board.ex | starcoder |
defmodule Day7.Part1 do
use Bitwise
def start(input \\ "input.txt") do
inputs = File.read!(input)
|> String.split(~r{\n})
|> Enum.filter(&(&1 != ""))
|> Enum.map(&convert_signal/1)
results = inputs
|> process(HashDict.new)
IO.puts "Result A: #{results["a"]}"
b_index = inputs
|> Enum.find_index(&(&1.target == "b"))
b_signal = inputs
|> Enum.find(&(&1.target == "b"))
b_signal = %{b_signal | source: results["a"]}
IO.puts "New B Signal: "
IO.inspect b_signal
second_results = List.replace_at(inputs, b_index, b_signal)
|> process(HashDict.new)
IO.puts "Result A: #{second_results["a"]}"
end
def convert_signal(line) do
line
|> String.split(" ")
|> Enum.map(fn(val) ->
case Integer.parse(val) do
:error -> val
{int, _} -> int
end
end)
|> parse_line
end
def parse_line([line_input,"->",target]) when is_number(line_input) do
%{op: :input, source: line_input, target: target}
end
def parse_line([line_input,"->",target]) when is_binary(line_input) do
%{op: :line, source: line_input, target: target}
end
def parse_line(["NOT",source,"->",target]) do
%{op: :not, source: source, target: target}
end
def parse_line([source,"LSHIFT",quantity,"->",target]) do
%{op: :lshift, source: source, quantity: quantity, target: target}
end
def parse_line([source,"RSHIFT",quantity,"->",target]) do
%{op: :rshift, source: source, quantity: quantity, target: target}
end
def parse_line([source1,"AND",source2,"->",target]) do
%{op: :and, source: [source1,source2], target: target}
end
def parse_line([source1,"OR",source2,"->",target]) do
%{op: :or, source: [source1,source2], target: target}
end
def process([], acc), do: acc
def process([%{ source: source} = signal|rest], acc) when is_number(source) do
results = Dict.put(acc, signal.target, source)
process(rest, results)
end
def process([%{ source: source } = signal|rest], acc) when is_binary(source) do
case Dict.fetch(acc, source) do
{:ok, value} ->
result = calculate(value, signal)
results = Dict.put(acc, signal.target, result)
process(rest, results)
:error ->
signals = List.insert_at(rest, Enum.count(rest), signal)
process(signals, acc)
end
end
def process([%{ source: source} = signal|rest], acc) when is_list(source) do
fetched = Enum.map(source, fn(s) ->
if is_number(s) do
s
else
case Dict.fetch(acc, s) do
{:ok, value} -> value
:error -> nil
end
end
end)
if Enum.any?(fetched, &(&1 == nil)) do
signals = List.insert_at(rest, Enum.count(rest), signal)
process(signals, acc)
else
result = calculate(fetched, signal)
results = Dict.put(acc, signal.target, result)
process(rest, results)
end
end
def calculate(value, op) do
result = _do_calc(value, op)
band(result,0xFFFF)
end
defp _do_calc(value, %{op: :line}), do: value
defp _do_calc(value, %{op: :not}), do: bnot(value)
defp _do_calc([value1, value2], %{op: :or}), do: bor(value1,value2)
defp _do_calc([value1, value2], %{op: :and}), do: band(value1,value2)
defp _do_calc(value, %{op: :lshift, quantity: quantity}), do: bsl(value,quantity)
defp _do_calc(value, %{op: :rshift, quantity: quantity}), do: bsr(value,quantity)
end | day7/lib/day7/part1.ex | 0.578567 | 0.491151 | part1.ex | starcoder |
defmodule Agent do
@moduledoc """
Agents are a simple abstraction around state.
Often in Elixir there is a need to share or store state that
must be accessed from different processes or by the same process
at different points in time.
The Agent module provides a basic server implementation that
allows state to be retrieved and updated via a simple API.
## Examples
For example, in the Mix tool that ships with Elixir, we need
to keep a set of all tasks executed by a given project. Since
this set is shared, we can implement it with an Agent:
defmodule Mix.TasksServer do
def start_link do
Agent.start_link(fn -> MapSet.new end, name: __MODULE__)
end
@doc "Checks if the task has already executed"
def executed?(task, project) do
item = {task, project}
Agent.get(__MODULE__, fn set ->
item in set
end)
end
@doc "Marks a task as executed"
def put_task(task, project) do
item = {task, project}
Agent.update(__MODULE__, &MapSet.put(&1, item))
end
@doc "Resets the executed tasks and return the previous list of tasks"
def take_all() do
Agent.get_and_update(__MODULE__, fn set ->
{Enum.into(set, []), MapSet.new}
end)
end
end
Note that agents still provide a segregation between the
client and server APIs, as seen in GenServers. In particular,
all code inside the function passed to the agent is executed
by the agent. This distinction is important because you may
want to avoid expensive operations inside the agent, as it will
effectively block the agent until the request is fulfilled.
Consider these two examples:
# Compute in the agent/server
def get_something(agent) do
Agent.get(agent, fn state -> do_something_expensive(state) end)
end
# Compute in the agent/client
def get_something(agent) do
Agent.get(agent, &(&1)) |> do_something_expensive()
end
The first function blocks the agent. The second function copies
all the state to the client and then executes the operation in the
client. The difference is whether the data is large enough to require
processing in the server, at least initially, or small enough to be
sent to the client cheaply.
## Name Registration
An Agent is bound to the same name registration rules as GenServers.
Read more about it in the `GenServer` docs.
## A word on distributed agents
It is important to consider the limitations of distributed agents. Agents
provide two APIs, one that works with anonymous functions and another
that expects an explicit module, function, and arguments.
In a distributed setup with multiple nodes, the API that accepts anonymous
functions only works if the caller (client) and the agent have the same
version of the caller module.
Keep in mind this issue also shows up when performing "rolling upgrades"
with agents. By rolling upgrades we mean the following situation: you wish
to deploy a new version of your software by *shutting down* some of your
nodes and replacing them with nodes running a new version of the software.
In this setup, part of your environment will have one version of a given
module and the other part another version (the newer one) of the same module.
The best solution is to simply use the explicit module, function, and arguments
APIs when working with distributed agents.
## Hot code swapping
An agent can have its code hot swapped live by simply passing a module,
function, and args tuple to the update instruction. For example, imagine
you have an agent named `:sample` and you want to convert its inner state
from some dict structure to a map. It can be done with the following
instruction:
{:update, :sample, {:advanced, {Enum, :into, [%{}]}}}
The agent's state will be added to the given list as the first argument.
"""
@typedoc "Return values of `start*` functions"
@type on_start :: {:ok, pid} | {:error, {:already_started, pid} | term}
@typedoc "The agent name"
@type name :: atom | {:global, term} | {:via, module, term}
@typedoc "The agent reference"
@type agent :: pid | {atom, node} | name
@typedoc "The agent state"
@type state :: term
@doc """
Starts an agent linked to the current process with the given function.
This is often used to start the agent as part of a supervision tree.
Once the agent is spawned, the given function is invoked and its return
value is used as the agent state. Note that `start_link` does not return
until the given function has returned.
## Options
The `:name` option is used for registration as described in the module
documentation.
If the `:timeout` option is present, the agent is allowed to spend at most
the given number of milliseconds on initialization or it will be terminated
and the start function will return `{:error, :timeout}`.
If the `:debug` option is present, the corresponding function in the
[`:sys` module](http://www.erlang.org/doc/man/sys.html) will be invoked.
If the `:spawn_opt` option is present, its value will be passed as options
to the underlying process as in `Process.spawn/4`.
## Return values
If the server is successfully created and initialized, the function returns
`{:ok, pid}`, where `pid` is the pid of the server. If an agent with the
specified name already exists, the function returns
`{:error, {:already_started, pid}}` with the pid of that process.
If the given function callback fails with `reason`, the function returns
`{:error, reason}`.
"""
@spec start_link((() -> term), GenServer.options) :: on_start
def start_link(fun, options \\ []) when is_function(fun, 0) do
GenServer.start_link(Agent.Server, fun, options)
end
@doc """
Starts an agent linked to the current process with the given module
function and arguments.
Same as `start_link/2` but a module, function and args are expected
instead of an anonymous function.
"""
@spec start_link(module, atom, [any], GenServer.options) :: on_start
def start_link(module, fun, args, options \\ []) do
GenServer.start_link(Agent.Server, {module, fun, args}, options)
end
@doc """
Starts an agent process without links (outside of a supervision tree).
See `start_link/2` for more information.
"""
@spec start((() -> term), GenServer.options) :: on_start
def start(fun, options \\ []) when is_function(fun, 0) do
GenServer.start(Agent.Server, fun, options)
end
@doc """
Starts an agent with the given module function and arguments.
Similar to `start/2` but a module, function and args are expected
instead of an anonymous function.
"""
@spec start(module, atom, [any], GenServer.options) :: on_start
def start(module, fun, args, options \\ []) do
GenServer.start(Agent.Server, {module, fun, args}, options)
end
@doc """
Gets an agent value via the given function.
The function `fun` is sent to the `agent` which invokes the function
passing the agent state. The result of the function invocation is
returned.
A timeout can also be specified (it has a default value of 5000).
"""
@spec get(agent, (state -> a), timeout) :: a when a: var
def get(agent, fun, timeout \\ 5000) when is_function(fun, 1) do
GenServer.call(agent, {:get, fun}, timeout)
end
@doc """
Gets an agent value via the given function.
Same as `get/3` but a module, function and args are expected
instead of an anonymous function. The state is added as first
argument to the given list of args.
"""
@spec get(agent, module, atom, [term], timeout) :: any
def get(agent, module, fun, args, timeout \\ 5000) do
GenServer.call(agent, {:get, {module, fun, args}}, timeout)
end
@doc """
Gets and updates the agent state in one operation.
The function `fun` is sent to the `agent` which invokes the function
passing the agent state. The function must return a tuple with two
elements, the first being the value to return (i.e. the `get` value)
and the second one is the new state.
A timeout can also be specified (it has a default value of 5000).
"""
@spec get_and_update(agent, (state -> {a, state}), timeout) :: a when a: var
def get_and_update(agent, fun, timeout \\ 5000) when is_function(fun, 1) do
GenServer.call(agent, {:get_and_update, fun}, timeout)
end
@doc """
Gets and updates the agent state in one operation.
Same as `get_and_update/3` but a module, function and args are expected
instead of an anonymous function. The state is added as first
argument to the given list of args.
"""
@spec get_and_update(agent, module, atom, [term], timeout) :: any
def get_and_update(agent, module, fun, args, timeout \\ 5000) do
GenServer.call(agent, {:get_and_update, {module, fun, args}}, timeout)
end
@doc """
Updates the agent state.
The function `fun` is sent to the `agent` which invokes the function
passing the agent state. The function must return the new state.
A timeout can also be specified (it has a default value of 5000).
This function always returns `:ok`.
"""
@spec update(agent, (state -> state), timeout) :: :ok
def update(agent, fun, timeout \\ 5000) when is_function(fun, 1) do
GenServer.call(agent, {:update, fun}, timeout)
end
@doc """
Updates the agent state.
Same as `update/3` but a module, function and args are expected
instead of an anonymous function. The state is added as first
argument to the given list of args.
"""
@spec update(agent, module, atom, [term], timeout) :: :ok
def update(agent, module, fun, args, timeout \\ 5000) do
GenServer.call(agent, {:update, {module, fun, args}}, timeout)
end
@doc """
Performs a cast (fire and forget) operation on the agent state.
The function `fun` is sent to the `agent` which invokes the function
passing the agent state. The function must return the new state.
Note that `cast` returns `:ok` immediately, regardless of whether the
destination node or agent exists.
"""
@spec cast(agent, (state -> state)) :: :ok
def cast(agent, fun) when is_function(fun, 1) do
GenServer.cast(agent, {:cast, fun})
end
@doc """
Performs a cast (fire and forget) operation on the agent state.
Same as `cast/2` but a module, function and args are expected
instead of an anonymous function. The state is added as first
argument to the given list of args.
"""
@spec cast(agent, module, atom, [term]) :: :ok
def cast(agent, module, fun, args) do
GenServer.cast(agent, {:cast, {module, fun, args}})
end
@doc """
Stops the agent.
Returns `:ok` if the agent is stopped within the given `timeout`.
"""
@spec stop(agent, timeout) :: :ok
def stop(agent, timeout \\ 5000) do
GenServer.call(agent, :stop, timeout)
end
end | lib/elixir/lib/agent.ex | 0.897827 | 0.79736 | agent.ex | starcoder |
defmodule Commanded.Scheduler.Jobs do
@moduledoc false
use GenServer
import Ex2ms
alias Commanded.Scheduler.{Jobs, JobSupervisor, OneOffJob, RecurringJob}
defstruct [:schedule_table, :jobs_table]
def start_link(args) do
GenServer.start_link(__MODULE__, args, name: __MODULE__)
end
@doc """
Schedule a named one-off job using the given module, function, args to run at
the specified date/time.
"""
@spec schedule_once(any, atom, [any], NaiveDateTime.t() | DateTime.t()) :: :ok
def schedule_once(name, module, args, run_at)
when is_atom(module) do
GenServer.call(__MODULE__, {:schedule_once, name, module, args, run_at})
end
@doc """
Schedule a named recurring job using the given module, function, args to run
repeatedly on the given schedule.
"""
@spec schedule_recurring(any, atom, [any], String.t()) :: :ok
def schedule_recurring(name, module, args, schedule)
when is_atom(module) and is_bitstring(schedule) do
GenServer.call(__MODULE__, {:schedule_recurring, name, module, args, schedule})
end
@doc """
Cancel a named scheduled job.
"""
@spec cancel(any) :: :ok | {:error, reason :: any}
def cancel(name) do
GenServer.call(__MODULE__, {:cancel, name})
end
@doc """
Get all scheduled jobs.
"""
def scheduled_jobs do
GenServer.call(__MODULE__, :scheduled_jobs)
end
@doc """
Get pending jobs due at the given date/time (in UTC).
"""
def pending_jobs(now) do
GenServer.call(__MODULE__, {:pending_jobs, now})
end
@doc """
Get all currently running jobs.
"""
def running_jobs do
GenServer.call(__MODULE__, :running_jobs)
end
def run_jobs(now) do
GenServer.call(__MODULE__, {:run_jobs, now})
end
def init(_) do
state = %Jobs{
schedule_table: :ets.new(:schedule_table, [:set, :private]),
jobs_table: :ets.new(:jobs_table, [:set, :private])
}
schedule_job_run()
{:ok, state}
end
def handle_call({:schedule_once, name, module, args, run_at}, _from, state) do
reply =
schedule_job(
name,
%OneOffJob{name: name, module: module, args: args, run_at: run_at},
epoch_seconds(run_at),
state
)
{:reply, reply, state}
end
def handle_call({:schedule_recurring, name, module, args, schedule}, _from, state) do
reply =
schedule_job(
name,
%RecurringJob{name: name, module: module, args: args, schedule: schedule},
nil,
state
)
{:reply, reply, state}
end
def handle_call({:cancel, name}, _from, state) do
reply = remove_job(name, state)
{:reply, reply, state}
end
def handle_call(:scheduled_jobs, _from, %Jobs{schedule_table: schedule_table} = state) do
reply =
schedule_table
|> :ets.tab2list()
|> Enum.map(fn {_name, _due_at, _status, job} -> job end)
{:reply, reply, state}
end
def handle_call({:pending_jobs, now}, _from, state) do
{:reply, pending_jobs(now, state), state}
end
def handle_call(:running_jobs, _from, state) do
{:reply, running_jobs(state), state}
end
def handle_call({:run_jobs, now}, _from, state) do
execute_pending_jobs(now, state)
{:reply, :ok, state}
end
def handle_info(:run_jobs, state) do
utc_now() |> execute_pending_jobs(state)
schedule_job_run()
{:noreply, state}
end
def handle_info({:DOWN, ref, :process, _object, _reason}, state) do
{:noreply, remove_completed_job(ref, state)}
end
defp schedule_job(name, job, run_at, %Jobs{schedule_table: schedule_table} = state) do
case job_exists?(name, state) do
false ->
:ets.insert(schedule_table, {name, run_at, :pending, job})
:ok
true ->
{:error, :already_scheduled}
end
end
defp remove_job(name, %Jobs{schedule_table: schedule_table} = state) do
case job_exists?(name, state) do
true ->
:ets.delete(schedule_table, name)
:ok
false ->
{:error, :not_scheduled}
end
end
defp job_exists?(name, %Jobs{schedule_table: schedule_table}) do
case :ets.lookup(schedule_table, name) do
[_job] -> true
[] -> false
end
end
defp pending_jobs(now, %Jobs{schedule_table: schedule_table}) do
due_at_epoch = epoch_seconds(now)
predicate =
fun do
{_name, due_at, status, job} when due_at <= ^due_at_epoch and status == :pending -> job
end
:ets.select(schedule_table, predicate)
end
defp running_jobs(%Jobs{schedule_table: schedule_table}) do
predicate =
fun do
{_name, _due_at, status, job} when status == :running -> job
end
:ets.select(schedule_table, predicate)
end
defp execute_pending_jobs(now, state) do
for job <- pending_jobs(now, state) do
execute_job(job, state)
end
end
defp execute_job(%OneOffJob{name: name, module: module, args: args}, %Jobs{
jobs_table: jobs_table,
schedule_table: schedule_table
}) do
with {:ok, pid} <- JobSupervisor.start_job(name, module, args) do
ref = Process.monitor(pid)
:ets.update_element(schedule_table, name, {3, :running})
:ets.insert(jobs_table, {ref, name})
end
end
defp execute_job(%RecurringJob{name: name, module: module, args: args}, %Jobs{}) do
{:ok, _pid} = JobSupervisor.start_job(name, module, args)
end
defp remove_completed_job(
ref,
%Jobs{jobs_table: jobs_table, schedule_table: schedule_table} = state
) do
case :ets.lookup(jobs_table, ref) do
[{ref, name}] ->
:ets.delete(jobs_table, ref)
:ets.delete(schedule_table, name)
state
_ ->
state
end
end
defp schedule_job_run, do: Process.send_after(self(), :run_jobs, schedule_interval())
defp schedule_interval,
do: Application.get_env(:commanded_scheduler, :schedule_interval, 60_000)
defp epoch_seconds(%DateTime{} = due_at),
do: DateTime.diff(due_at, DateTime.from_unix!(0), :second)
defp epoch_seconds(%NaiveDateTime{} = due_at),
do: NaiveDateTime.diff(due_at, ~N[1970-01-01 00:00:00], :second)
defp utc_now, do: NaiveDateTime.utc_now()
end | lib/commanded/scheduler/jobs/jobs.ex | 0.789761 | 0.411406 | jobs.ex | starcoder |
defmodule CodeSigning do
@moduledoc """
Code signing and verification functions for BEAM binaries using Ed25519 signatures.
All strings for paths need to be passed as charlists for Erlang compatibility.
"""
@typedoc """
The filename as a string or the BEAM module binary.
"""
@type beam :: charlist() | binary()
@typedoc """
A tuple of the chunk ID and its binary data.
"""
@type chunkdata :: {charlist(), binary()}
@doc """
Signs the the given BEAM binary or path to `.beam` file using the Ed25519 `secret_key`.
Returns the modified binary that can be written to a file.
When given a BEAM binary, it will sign the binary.
When given a path to a `.beam` file, it will sign the binary without modifying the original file.
"""
@spec sign(beam(), Ed25519.key()) :: binary
def sign(module_or_path, secret_key) do
chunks = module_chunks(module_or_path)
chunks
|> sign_bytecode(secret_key)
|> (&write_signature_attribute(chunks, &1)).()
|> build_module
end
@doc """
Verifies the signature of the given BEAM binary or path to `.beam` file using the
Ed25519 `public_key`.
When given a BEAM binary, it will verify the signature of the binary.
When given a path to a `.beam` file, it will verify the signature of the binary without
modifying the original file.
"""
@spec valid_signature?(beam(), Ed25519.key()) :: boolean
def valid_signature?(module_or_path, public_key) do
chunks = module_chunks(module_or_path)
code = chunks |> code_binary
chunks
|> read_signature_attribute
|> Ed25519.valid_signature?(code, public_key)
end
@doc """
Verifies the signature of the given BEAM binary using the Ed25519 `public_key`. If the
signature is valid, the module will be loaded.
"""
@spec load(atom(), binary(), Ed25519.key()) :: :ok | :error
def load(module, binary, public_key) do
case valid_signature?(binary, public_key) do
true ->
:code.load_binary(module, nil, binary)
:ok
_ ->
:error
end
end
@doc """
Verifies the signature of the given path to `.beam` file using the
Ed25519 `public_key`. If the signature is valid, the module will be loaded.
Module names should be atoms prefixed with Elixir, such as `String.to_atom("Elixir.MyModule")`
"""
@spec load_file(atom(), charlist(), Ed25519.key()) :: :ok | :error
def load_file(module, beam_path, public_key) do
case valid_signature?(beam_path, public_key) do
true ->
{:ok, binary, _} = :erl_prim_loader.get_file(beam_path)
:code.load_binary(module, beam_path, binary)
:ok
_ ->
:error
end
end
@spec sign_bytecode([chunkdata()], Ed25519.key()) :: Ed25519.signature()
defp sign_bytecode(chunks, secret_key) do
chunks |> code_binary |> Ed25519.signature(secret_key)
end
@spec read_signature_attribute([chunkdata()]) :: Ed25519.signature()
defp read_signature_attribute(chunks) do
case :lists.keyfind('Attr', 1, chunks) do
{'Attr', attributes} ->
case :erlang.binary_to_term(attributes) |> Keyword.get(:signature) do
nil -> nil
signature -> signature |> hd
end
_ ->
nil
end
end
@spec write_signature_attribute([chunkdata()], Ed25519.signature()) :: [chunkdata()]
defp write_signature_attribute(chunks, signature) do
case :lists.keyfind('Attr', 1, chunks) do
{'Attr', attributes} ->
attribute_list = [signature: [signature]] ++ :erlang.binary_to_term(attributes)
:lists.keyreplace('Attr', 1, chunks, {'Attr', :erlang.term_to_binary(attribute_list)})
_ ->
attribute_list = [signature: [signature]]
:lists.append(chunks, [{'Attr', :erlang.term_to_binary(attribute_list)}])
end
end
@spec code_binary([chunkdata()]) :: binary
defp code_binary(chunks) do
with {'Code', code} <- :lists.keyfind('Code', 1, chunks) do
code
end
end
@spec module_chunks(beam()) :: [chunkdata()]
defp module_chunks(module_or_path) do
with {:ok, _, chunks} <- :beam_lib.all_chunks(module_or_path) do
chunks
end
end
@spec build_module([chunkdata()]) :: binary
defp build_module(chunks) do
with {:ok, binary} <- :beam_lib.build_module(chunks) do
binary
end
end
end | lib/code_signing.ex | 0.912109 | 0.619025 | code_signing.ex | starcoder |
defmodule Club.ColorParser do
@moduledoc """
Wikipedia color names/hex values parser.
All the colors data itself has [Wikipedia copyrights](https://en.wikipedia.org/wiki/Wikipedia:Copyrights)
and distributed under the [Creative Commons Attribution-ShareAlike 3.0 Unported License](https://en.wikipedia.org/wiki/Wikipedia:Text_of_Creative_Commons_Attribution-ShareAlike_3.0_Unported_License).
"""
@urls [
"https://en.wikipedia.org/wiki/List_of_colors:_A%E2%80%93F",
"https://en.wikipedia.org/wiki/List_of_colors:_G%E2%80%93M",
"https://en.wikipedia.org/wiki/List_of_colors:_N%E2%80%93Z"
]
@filename "colors.json"
def parse_and_save(filename \\ @filename) do
parse()
|> Jason.encode!()
|> Jason.Formatter.pretty_print()
|> save(filename)
end
def parse do
[]
|> get_colors_from_urls(@urls)
|> Enum.map(fn
{color, hex} ->
%{uuid: UUID.uuid4(), name: color, hex: String.trim(hex, "#")}
end)
|> Enum.reverse()
end
defp save(colors_text, filename) do
File.write!(filename, colors_text)
end
defp get_colors_from_urls(parsed_colors, [url | rest]) do
charurl = String.to_charlist(url)
{:ok, {_, _, body}} = :httpc.request(charurl)
rows =
body
|> List.to_string()
|> Floki.find("#mw-content-text > div.mw-parser-output > table > tbody > tr")
parsed_colors
|> parse_rows(rows)
|> get_colors_from_urls(rest)
end
defp get_colors_from_urls(parsed_colors, []), do: parsed_colors
defp parse_rows(parsed_colors, [
{"tr", _,
[
{"th", _, _},
{"th", _, _},
{"th", _, _},
{"th", _, _},
{"th", _, _},
{"th", _, _},
{"th", _, _},
{"th", _, _},
{"th", _, _},
{"th", _, _}
]}
| rest
]),
do: parse_rows(parsed_colors, rest)
defp parse_rows(parsed_colors, [{"tr", _, children} | rest]) do
[parse_row(children) | parsed_colors]
|> parse_rows(rest)
end
defp parse_rows(parsed_colors, []), do: parsed_colors
defp parse_row([color, hex, _, _, _, _, _, _, _, _]),
do: {Floki.text(color), Floki.text(hex) |> String.trim()}
end | lib/club/color_parser.ex | 0.782496 | 0.607692 | color_parser.ex | starcoder |
defmodule Utils.Types.WalletAddress do
@moduledoc """
A custom Ecto type that handles wallet addresses. A wallet address is a string
that consists of 4 case-insensitive letters followed by a 12-digit integer.
All non-alphanumerics are stripped and ignored.
Although this custom type is a straight-forward string primitive, it validates
the given wallet address before allowing the value to be casted. Hence it gives
a better assurance that the value stored by this type follows a consistent format.
This module also provides a helper macro `wallet_address/1` for setting up
a schema field that autogenerates the wallet address.
"""
@behaviour Ecto.Type
alias Ecto.Schema
alias Utils.Helpers.UUID
# 4-char letters, 12-digit integers
@type t :: <<_::16>>
# The alphabets to use for randoming the wallet address
@alphabets "abcdefghijklmnopqrstuvwxyz"
# The numbers to use for randoming the wallet address
@numbers "0123456789"
# The defaults to use to define the field.
@default_opts [
# The string to use as the 4-letter at the beginning of the address.
prefix: nil,
# The function to use for autogenerating the value.
autogenerate: nil
]
@doc """
Returns the underlying Ecto primitive type.
"""
def type, do: :string
@doc """
Casts the given input to the schema struct.
Returns `{:ok, value}` on successful casting where `value` is a string of 3-character symbol,
an underscore and 26-character ULID string. Returns `:error` on failure.
"""
@spec cast(String.t()) :: {:ok, String.t()} | :error
def cast(address) do
# We still want to support the old UUID format.
case UUID.valid?(address) do
true ->
{:ok, address}
_ ->
address =
address
|> String.replace(~r/[^A-Za-z0-9]/, "")
|> String.downcase()
case String.match?(address, ~r/^[a-z0-9]{4}[0-9]{12}$/) do
true -> {:ok, address}
_ -> :error
end
end
end
@doc """
Transforms the value after loaded from the database.
"""
@spec load(String.t()) :: {:ok, String.t()}
def load(value), do: {:ok, value}
@doc """
Prepares the value for saving to database.
"""
@spec dump(String.t()) :: {:ok, String.t()}
def dump(value), do: {:ok, value}
@doc """
Defines a wallet address field on a schema.
## Example
defmodule WalletSchema do
use Utils.Types.WalletAddress
schema "wallet" do
wallet_address(:address)
end
end
"""
defmacro wallet_address(field_name, opts \\ []) do
opts = Keyword.merge(@default_opts, opts)
type = __MODULE__
quote bind_quoted: binding() do
autogen_fn = opts[:autogenerate] || {type, :autogenerate, [opts[:prefix]]}
Schema.field(field_name, type, [])
Module.put_attribute(__MODULE__, :ecto_autogenerate, {[field_name], autogen_fn})
end
end
@doc """
Generates a new wallet address with the format `aaaa000000000000`,
where `a` is a random a-z letter and `0` is a random 1-digit integer.
Returns `{:ok, address}`.
"""
@spec generate() :: {:ok, String.t()}
def generate do
prefix = random(4, @alphabets)
generate(prefix)
end
@doc """
Generates a new wallet address. Accepts up to 4-letter prefix,
uses it as the address's prefix and randomize the rest with integers.
Returns `{:ok, address}` on success.
Returns `:error` if more than 4 letters or invalid characters are given.
"""
@spec generate(String.t() | nil) :: String.t() | :error
def generate(prefix) when byte_size(prefix) <= 4 do
case String.match?(prefix, ~r/^[a-z0-9]*$/) do
true ->
random_length = 16 - String.length(prefix)
{:ok, prefix <> random(random_length, @numbers)}
false ->
:error
end
end
def generate(nil), do: generate()
def generate(_), do: :error
defp random(output_length, pool) when is_binary(pool) do
random(output_length, String.split(pool, "", trim: true))
end
defp random(output_length, pool) do
1..output_length
|> Enum.reduce([], fn _, acc -> [Enum.random(pool) | acc] end)
|> Enum.join("")
end
# Callback invoked by autogenerate fields.
@doc false
def autogenerate(prefix) do
{:ok, address} = generate(prefix)
address
end
defmacro __using__(_) do
quote do
import Utils.Types.WalletAddress, only: [wallet_address: 1, wallet_address: 2]
end
end
end | apps/utils/lib/types/wallet_address.ex | 0.915884 | 0.44559 | wallet_address.ex | starcoder |
defmodule Protobuf do
@moduledoc """
`protoc` should always be used to generate code instead of writing the code by hand.
By `use` this module, macros defined in `Protobuf.DSL` will be injected. Most of thee macros
are equal to definition in .proto files.
defmodule Foo do
use Protobuf, syntax: :proto3
defstruct [:a, :b]
field :a, 1, type: :int32
field :b, 2, type: :string
end
Your Protobuf message(module) is just a normal Elixir struct. Some useful functions are also injected,
see "Callbacks" for details. Examples:
foo1 = Foo.new!(%{a: 1})
foo1.b == ""
bin = Foo.encode(foo1)
foo1 == Foo.decode(bin)
Except functions in "Callbacks", some other functions may be defined:
* Extension functions when your Protobuf message use extensions. See `Protobuf.Extension` for details.
* `put_extension(struct, extension_mod, field, value)`
* `get_extension(struct, extension_mod, field, default \\ nil)`
"""
defmacro __using__(opts) do
quote location: :keep do
import Protobuf.DSL, only: [field: 3, field: 2, oneof: 2, extend: 4, extensions: 1]
Module.register_attribute(__MODULE__, :fields, accumulate: true)
Module.register_attribute(__MODULE__, :oneofs, accumulate: true)
Module.register_attribute(__MODULE__, :extends, accumulate: true)
Module.register_attribute(__MODULE__, :extensions, [])
@options unquote(opts)
@before_compile Protobuf.DSL
@behaviour Protobuf
def new() do
Protobuf.Builder.new(__MODULE__)
end
def new(attrs) do
Protobuf.Builder.new(__MODULE__, attrs)
end
def new!(attrs) do
Protobuf.Builder.new!(__MODULE__, attrs)
end
def transform_module() do
nil
end
defoverridable transform_module: 0
unquote(def_encode_decode())
end
end
defp def_encode_decode() do
quote do
def decode(data), do: Protobuf.Decoder.decode(data, __MODULE__)
def encode(struct), do: Protobuf.Encoder.encode(struct)
end
end
@doc """
Build a blank struct with default values. This and other "new" functions are
preferred than raw building struct method like `%Foo{}`.
In proto3, the zero values are the default values.
"""
@callback new() :: struct
@doc """
Build and update the struct with passed fields.
"""
@callback new(Enum.t()) :: struct
@doc """
Similar to `new/1`, but use `struct!/2` to build the struct, so
errors will be raised if unknown keys are passed.
"""
@callback new!(Enum.t()) :: struct
@doc """
Encode the struct to a protobuf binary.
Errors may be raised if there's something wrong in the struct.
"""
@callback encode(struct) :: binary
@doc """
Decode a protobuf binary to a struct.
Errors may be raised if there's something wrong in the binary.
"""
@callback decode(binary) :: struct
@doc """
Returns `nil` or a transformer module that implements the `Protobuf.TransformModule`
behaviour.
This function is overridable in your module.
"""
@callback transform_module() :: module | nil
@doc """
It's preferable to use message's `decode` function, like:
Foo.decode(bin)
"""
@spec decode(binary, module) :: struct
def decode(data, mod) do
Protobuf.Decoder.decode(data, mod)
end
@doc """
It's preferable to use message's `encode` function, like:
Foo.encode(foo)
"""
@spec encode(struct) :: binary
def encode(struct) do
Protobuf.Encoder.encode(struct)
end
@doc """
Loads extensions modules.
This function should be called in your application's `start/2` callback,
as seen in the example below, if you wish to use extensions.
## Example
def start(_type, _args) do
Protobuf.load_extensions()
Supervisor.start_link([], strategy: :one_for_one)
end
"""
@spec load_extensions() :: :ok
def load_extensions() do
Protobuf.Extension.__cal_extensions__()
:ok
end
end | lib/protobuf.ex | 0.885341 | 0.494507 | protobuf.ex | starcoder |
defmodule Mix.Tasks.Ecto.Gen.Erd do
@moduledoc """
A mix task to generate an ERD (Entity Relationship Diagram) in various formats.
Supported formats:
* [DOT](#module-dot)
* [PlantUML](#module-plantuml)
* [DBML](#module-dbml)
* [QuickDBD](#module-quickdbd)
Configuration examples and output for a couple of open-source projects can be found in EXAMPLES group of PAGES section.
## DOT
[DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) format is able to represent all available types of entities:
* schemas
* embedded schemas
* schemaless tables (automatically derived from many-to-many relations)
Clusters are supported and can be set in `:map_node` option using `Ecto.ERD.Node.set_cluster/2`.
You should have installed [graphviz](https://graphviz.org/) in order to convert `*.dot` file to image.
```
$ mix ecto.gen.erd # generates ecto_erd.dot
$ mix ecto.gen.erd --output-path=ecto_erd.dot
$ mix ecto.gen.erd && dot -Tpng ecto_erd.dot -o erd.png && xdg-open erd.png
```
## PlantUML
[PlantUML](https://plantuml.com) is equal to DOT in number of supported features.
You should have installed [plantuml](https://plantuml.com/download) in order to convert `*.puml` file to image.
```
$ mix ecto.gen.erd --output-path=erd.puml
$ mix ecto.gen.erd --output-path=erd.puml && plantuml erd.puml && xdg-open erd.png
```
*Tip: if output image is cropped, you may need to adjust image size with `PLANTUML_LIMIT_SIZE` environment variable.*
## DBML
[DBML](https://www.dbml.org/) format is more limited in number of displayed entity types comparing to DOT and PlantUML, as it is focused on tables only.
Multiple schemas that use the same table are merged into one table. Embedded schemas, obviously,
cannot be displayed at all.
`TableGroup`s are supported and can be set in `:map_node` option using `Ecto.ERD.Node.set_cluster/2`.
This format is very handy if you use [dbdiagram.io](https://dbdiagram.io) or [dbdocs.io](https://dbdocs.io).
```
$ mix ecto.gen.erd --output-path=ecto_erd.dbml
```
## QuickDBD
A format that is used by [QuickDBD](https://www.quickdatabasediagrams.com) - a competitor of [dbdiagram.io](https://dbdiagram.io).
Similarly to DBML, it is also focused on tables and cannot display embeded schemas. However, this format doesn't support clusters.
It doesn't have a reserved file extension, but we use `*.qdbd`.
```
$ mix ecto.gen.erd --output-path=ecto_erd.qdbd
```
## Command line options
* `--output-path` - the path to the output file, defaults to `ecto_erd.dot`. Supported file extensions: `dot`, `puml`, `dbml`, `qdbd`.
* `--config-path` - the path to the config file, defaults to `.ecto_erd.exs`.
## Configuration file
When running a `mix ecto.gen.erd` task, it tries to read a configuration file from the `.ecto_erd.exs` file in a current
working directory. Configuration file must return a keyword list.
### Options
* `:fontname` - font name, defaults to `Roboto Mono`. Must be monospaced font if output format is `dot` and more than 1 column is displayed.
The option is only supported for `dot` and `puml` files.
* `:columns` - list of columns which will be displayed for each node (schema/source). Set to `[]` to hide fields completelly.
Available columns: `:name` and `:type`. Defaults to `[:name, :type]`. The option is only supported for `dot` and `puml` files.
* `:map_node` - function which allows to remove the node from the diagram or to move the node to the cluster. Defaults to `Function.identity/1`,
which means that all nodes should be displayed and all of them are outside any cluster.
Use `Ecto.ERD.Node.set_cluster/2` in this function to set a cluster (not supported by [QuickDBD](#module-quickdbd)).
In order to remove the node, the function must return `nil`.
* `:otp_app` - an application which will be scanned alongside with dependent applications in order to get a list of Ecto schemas.
Defaults to `Mix.Project.config()[:app]`. You need to configure this option only if you run a task from the umbrella root.
Default values can be represented as follows:
# .ecto_erd.exs
[
fontname: "Roboto Mono", # used only by dot and puml
columns: [:name, :type], # used only by dot and puml
map_node: &Function.identity/1,
otp_app: Mix.Project.config()[:app]
]
"""
@shortdoc "Generate an ERD"
use Mix.Task
@requirements ["app.config"]
@impl true
def run(args) do
{cli_opts, _} =
OptionParser.parse!(args, strict: [output_path: :string, config_path: :string])
config_path = Keyword.get(cli_opts, :config_path, ".ecto_erd.exs")
file_opts =
if File.exists?(config_path) do
{file_opts, _} = Code.eval_file(config_path)
file_opts
else
[]
end
otp_app =
cond do
Keyword.has_key?(file_opts, :otp_app) -> file_opts[:otp_app]
not is_nil(Mix.Project.config()[:app]) -> Mix.Project.config()[:app]
true -> raise "Unable to detect `:otp_app`, please specify it explicitly"
end
output_path = cli_opts[:output_path] || file_opts[:output_path] || "ecto_erd.dot"
map_node_callback = file_opts[:map_node] || (&Function.identity/1)
schema_modules = Ecto.ERD.SchemaModules.scan(otp_app)
output =
case Path.extname(output_path) do
".dot" ->
fontname = file_opts[:fontname] || "Roboto Mono"
columns = file_opts[:columns] || [:name, :type]
schema_modules
|> Ecto.ERD.Graph.new([:associations, :embeds])
|> Ecto.ERD.Graph.map_nodes(map_node_callback)
|> Ecto.ERD.Graph.sort()
|> Ecto.ERD.Dot.render(fontname: fontname, columns: columns)
".puml" ->
fontname = file_opts[:fontname] || "Roboto Mono"
columns = file_opts[:columns] || [:name, :type]
schema_modules
|> Ecto.ERD.Graph.new([:associations, :embeds])
|> Ecto.ERD.Graph.map_nodes(map_node_callback)
|> Ecto.ERD.Graph.sort()
|> Ecto.ERD.PlantUML.render(fontname: fontname, columns: columns)
".dbml" ->
schema_modules
|> Ecto.ERD.Graph.new([:associations])
|> Ecto.ERD.Graph.map_nodes(map_node_callback)
|> Ecto.ERD.Graph.make_schemaless()
|> Ecto.ERD.Graph.sort()
|> Ecto.ERD.DBML.render()
".qdbd" ->
schema_modules
|> Ecto.ERD.Graph.new([:associations])
|> Ecto.ERD.Graph.map_nodes(map_node_callback)
|> Ecto.ERD.Graph.make_schemaless()
|> Ecto.ERD.Graph.sort()
|> Ecto.ERD.QuickDBD.render()
end
File.write!(output_path, output)
end
end | lib/mix/tasks/ecto.gen.erd.ex | 0.852383 | 0.904819 | ecto.gen.erd.ex | starcoder |
defmodule AWS.OpsWorks do
@moduledoc """
AWS OpsWorks
Welcome to the *AWS OpsWorks Stacks API Reference*. This guide provides
descriptions, syntax, and usage examples for AWS OpsWorks Stacks actions
and data types, including common parameters and error codes.
AWS OpsWorks Stacks is an application management service that provides an
integrated experience for overseeing the complete application lifecycle.
For information about this product, go to the [AWS
OpsWorks](http://aws.amazon.com/opsworks/) details page.
**SDKs and CLI**
The most common way to use the AWS OpsWorks Stacks API is by using the AWS
Command Line Interface (CLI) or by using one of the AWS SDKs to implement
applications in your preferred language. For more information, see:
<ul> <li> [AWS
CLI](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html)
</li> <li> [AWS SDK for
Java](http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html)
</li> <li> [AWS SDK for
.NET](http://docs.aws.amazon.com/sdkfornet/latest/apidocs/html/N_Amazon_OpsWorks.htm)
</li> <li> [AWS SDK for PHP
2](http://docs.aws.amazon.com/aws-sdk-php-2/latest/class-Aws.OpsWorks.OpsWorksClient.html)
</li> <li> [AWS SDK for Ruby](http://docs.aws.amazon.com/sdkforruby/api/)
</li> <li> [AWS SDK for
Node.js](http://aws.amazon.com/documentation/sdkforjavascript/)
</li> <li> [AWS SDK for
Python(Boto)](http://docs.pythonboto.org/en/latest/ref/opsworks.html)
</li> </ul> **Endpoints**
AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must
connect to one of the following endpoints. Stacks can only be accessed or
managed within the endpoint in which they are created.
<ul> <li> opsworks.us-east-1.amazonaws.com
</li> <li> opsworks.us-east-2.amazonaws.com
</li> <li> opsworks.us-west-1.amazonaws.com
</li> <li> opsworks.us-west-2.amazonaws.com
</li> <li> opsworks.ca-central-1.amazonaws.com (API only; not available in
the AWS console)
</li> <li> opsworks.eu-west-1.amazonaws.com
</li> <li> opsworks.eu-west-2.amazonaws.com
</li> <li> opsworks.eu-west-3.amazonaws.com
</li> <li> opsworks.eu-central-1.amazonaws.com
</li> <li> opsworks.ap-northeast-1.amazonaws.com
</li> <li> opsworks.ap-northeast-2.amazonaws.com
</li> <li> opsworks.ap-south-1.amazonaws.com
</li> <li> opsworks.ap-southeast-1.amazonaws.com
</li> <li> opsworks.ap-southeast-2.amazonaws.com
</li> <li> opsworks.sa-east-1.amazonaws.com
</li> </ul> **Chef Versions**
When you call `CreateStack`, `CloneStack`, or `UpdateStack` we recommend
you use the `ConfigurationManager` parameter to specify the Chef version.
The recommended and default value for Linux stacks is currently 12. Windows
stacks use Chef 12.2. For more information, see [Chef
Versions](http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html).
<note> You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We
recommend migrating your existing Linux stacks to Chef 12 as soon as
possible.
</note>
"""
@doc """
Assign a registered instance to a layer.
<ul> <li> You can assign registered on-premises instances to any layer
type.
</li> <li> You can assign registered Amazon EC2 instances only to custom
layers.
</li> <li> You cannot use this action with instances that were created with
AWS OpsWorks Stacks.
</li> </ul> **Required Permissions**: To use this action, an AWS Identity
and Access Management (IAM) user must have a Manage permissions level for
the stack or an attached policy that explicitly grants permissions. For
more information on user permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def assign_instance(client, input, options \\ []) do
request(client, "AssignInstance", input, options)
end
@doc """
Assigns one of the stack's registered Amazon EBS volumes to a specified
instance. The volume must first be registered with the stack by calling
`RegisterVolume`. After you register the volume, you must call
`UpdateVolume` to specify a mount point before calling `AssignVolume`. For
more information, see [Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def assign_volume(client, input, options \\ []) do
request(client, "AssignVolume", input, options)
end
@doc """
Associates one of the stack's registered Elastic IP addresses with a
specified instance. The address must first be registered with the stack by
calling `RegisterElasticIp`. For more information, see [Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def associate_elastic_ip(client, input, options \\ []) do
request(client, "AssociateElasticIp", input, options)
end
@doc """
Attaches an Elastic Load Balancing load balancer to a specified layer. AWS
OpsWorks Stacks does not support Application Load Balancer. You can only
use Classic Load Balancer with AWS OpsWorks Stacks. For more information,
see [Elastic Load
Balancing](http://docs.aws.amazon.com/opsworks/latest/userguide/layers-elb.html).
<note> You must create the Elastic Load Balancing instance separately, by
using the Elastic Load Balancing console, API, or CLI. For more
information, see [ Elastic Load Balancing Developer
Guide](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/Welcome.html).
</note> **Required Permissions**: To use this action, an IAM user must have
a Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def attach_elastic_load_balancer(client, input, options \\ []) do
request(client, "AttachElasticLoadBalancer", input, options)
end
@doc """
Creates a clone of a specified stack. For more information, see [Clone a
Stack](http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-cloning.html).
By default, all parameters are set to the values used by the parent stack.
**Required Permissions**: To use this action, an IAM user must have an
attached policy that explicitly grants permissions. For more information on
user permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def clone_stack(client, input, options \\ []) do
request(client, "CloneStack", input, options)
end
@doc """
Creates an app for a specified stack. For more information, see [Creating
Apps](http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_app(client, input, options \\ []) do
request(client, "CreateApp", input, options)
end
@doc """
Runs deployment or stack commands. For more information, see [Deploying
Apps](http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-deploying.html)
and [Run Stack
Commands](http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-commands.html).
**Required Permissions**: To use this action, an IAM user must have a
Deploy or Manage permissions level for the stack, or an attached policy
that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_deployment(client, input, options \\ []) do
request(client, "CreateDeployment", input, options)
end
@doc """
Creates an instance in a specified stack. For more information, see [Adding
an Instance to a
Layer](http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_instance(client, input, options \\ []) do
request(client, "CreateInstance", input, options)
end
@doc """
Creates a layer. For more information, see [How to Create a
Layer](http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-create.html).
<note> You should use **CreateLayer** for noncustom layer types such as PHP
App Server only if the stack does not have an existing layer of that type.
A stack can have at most one instance of each noncustom layer; if you
attempt to create a second instance, **CreateLayer** fails. A stack can
have an arbitrary number of custom layers, so you can call **CreateLayer**
as many times as you like for that layer type.
</note> **Required Permissions**: To use this action, an IAM user must have
a Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_layer(client, input, options \\ []) do
request(client, "CreateLayer", input, options)
end
@doc """
Creates a new stack. For more information, see [Create a New
Stack](http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-edit.html).
**Required Permissions**: To use this action, an IAM user must have an
attached policy that explicitly grants permissions. For more information on
user permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_stack(client, input, options \\ []) do
request(client, "CreateStack", input, options)
end
@doc """
Creates a new user profile.
**Required Permissions**: To use this action, an IAM user must have an
attached policy that explicitly grants permissions. For more information on
user permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def create_user_profile(client, input, options \\ []) do
request(client, "CreateUserProfile", input, options)
end
@doc """
Deletes a specified app.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def delete_app(client, input, options \\ []) do
request(client, "DeleteApp", input, options)
end
@doc """
Deletes a specified instance, which terminates the associated Amazon EC2
instance. You must stop an instance before you can delete it.
For more information, see [Deleting
Instances](http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-delete.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def delete_instance(client, input, options \\ []) do
request(client, "DeleteInstance", input, options)
end
@doc """
Deletes a specified layer. You must first stop and then delete all
associated instances or unassign registered instances. For more
information, see [How to Delete a
Layer](http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-delete.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def delete_layer(client, input, options \\ []) do
request(client, "DeleteLayer", input, options)
end
@doc """
Deletes a specified stack. You must first delete all instances, layers, and
apps or deregister registered instances. For more information, see [Shut
Down a
Stack](http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-shutting.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def delete_stack(client, input, options \\ []) do
request(client, "DeleteStack", input, options)
end
@doc """
Deletes a user profile.
**Required Permissions**: To use this action, an IAM user must have an
attached policy that explicitly grants permissions. For more information on
user permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def delete_user_profile(client, input, options \\ []) do
request(client, "DeleteUserProfile", input, options)
end
@doc """
Deregisters a specified Amazon ECS cluster from a stack. For more
information, see [ Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html#workinglayers-ecscluster-delete).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack or an attached policy that
explicitly grants permissions. For more information on user permissions,
see
[http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def deregister_ecs_cluster(client, input, options \\ []) do
request(client, "DeregisterEcsCluster", input, options)
end
@doc """
Deregisters a specified Elastic IP address. The address can then be
registered by another stack. For more information, see [Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def deregister_elastic_ip(client, input, options \\ []) do
request(client, "DeregisterElasticIp", input, options)
end
@doc """
Deregister a registered Amazon EC2 or on-premises instance. This action
removes the instance from the stack and returns it to your control. This
action can not be used with instances that were created with AWS OpsWorks
Stacks.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def deregister_instance(client, input, options \\ []) do
request(client, "DeregisterInstance", input, options)
end
@doc """
Deregisters an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def deregister_rds_db_instance(client, input, options \\ []) do
request(client, "DeregisterRdsDbInstance", input, options)
end
@doc """
Deregisters an Amazon EBS volume. The volume can then be registered by
another stack. For more information, see [Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def deregister_volume(client, input, options \\ []) do
request(client, "DeregisterVolume", input, options)
end
@doc """
Describes the available AWS OpsWorks Stacks agent versions. You must
specify a stack ID or a configuration manager. `DescribeAgentVersions`
returns a list of available agent versions for the specified stack or
configuration manager.
"""
def describe_agent_versions(client, input, options \\ []) do
request(client, "DescribeAgentVersions", input, options)
end
@doc """
Requests a description of a specified set of apps.
<note> This call accepts only one resource-identifying parameter.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_apps(client, input, options \\ []) do
request(client, "DescribeApps", input, options)
end
@doc """
Describes the results of specified commands.
<note> This call accepts only one resource-identifying parameter.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_commands(client, input, options \\ []) do
request(client, "DescribeCommands", input, options)
end
@doc """
Requests a description of a specified set of deployments.
<note> This call accepts only one resource-identifying parameter.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_deployments(client, input, options \\ []) do
request(client, "DescribeDeployments", input, options)
end
@doc """
Describes Amazon ECS clusters that are registered with a stack. If you
specify only a stack ID, you can use the `MaxResults` and `NextToken`
parameters to paginate the response. However, AWS OpsWorks Stacks currently
supports only one cluster per layer, so the result set has a maximum of one
element.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack or an attached policy
that explicitly grants permission. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
This call accepts only one resource-identifying parameter.
"""
def describe_ecs_clusters(client, input, options \\ []) do
request(client, "DescribeEcsClusters", input, options)
end
@doc """
Describes [Elastic IP
addresses](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html).
<note> This call accepts only one resource-identifying parameter.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_elastic_ips(client, input, options \\ []) do
request(client, "DescribeElasticIps", input, options)
end
@doc """
Describes a stack's Elastic Load Balancing instances.
<note> This call accepts only one resource-identifying parameter.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_elastic_load_balancers(client, input, options \\ []) do
request(client, "DescribeElasticLoadBalancers", input, options)
end
@doc """
Requests a description of a set of instances.
<note> This call accepts only one resource-identifying parameter.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_instances(client, input, options \\ []) do
request(client, "DescribeInstances", input, options)
end
@doc """
Requests a description of one or more layers in a specified stack.
<note> This call accepts only one resource-identifying parameter.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_layers(client, input, options \\ []) do
request(client, "DescribeLayers", input, options)
end
@doc """
Describes load-based auto scaling configurations for specified layers.
<note> You must specify at least one of the parameters.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_load_based_auto_scaling(client, input, options \\ []) do
request(client, "DescribeLoadBasedAutoScaling", input, options)
end
@doc """
Describes a user's SSH information.
**Required Permissions**: To use this action, an IAM user must have
self-management enabled or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_my_user_profile(client, input, options \\ []) do
request(client, "DescribeMyUserProfile", input, options)
end
@doc """
Describes the operating systems that are supported by AWS OpsWorks Stacks.
"""
def describe_operating_systems(client, input, options \\ []) do
request(client, "DescribeOperatingSystems", input, options)
end
@doc """
Describes the permissions for a specified stack.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_permissions(client, input, options \\ []) do
request(client, "DescribePermissions", input, options)
end
@doc """
Describe an instance's RAID arrays.
<note> This call accepts only one resource-identifying parameter.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_raid_arrays(client, input, options \\ []) do
request(client, "DescribeRaidArrays", input, options)
end
@doc """
Describes Amazon RDS instances.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy
that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
This call accepts only one resource-identifying parameter.
"""
def describe_rds_db_instances(client, input, options \\ []) do
request(client, "DescribeRdsDbInstances", input, options)
end
@doc """
Describes AWS OpsWorks Stacks service errors.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy
that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
This call accepts only one resource-identifying parameter.
"""
def describe_service_errors(client, input, options \\ []) do
request(client, "DescribeServiceErrors", input, options)
end
@doc """
Requests a description of a stack's provisioning parameters.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack or an attached policy
that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_stack_provisioning_parameters(client, input, options \\ []) do
request(client, "DescribeStackProvisioningParameters", input, options)
end
@doc """
Describes the number of layers and apps in a specified stack, and the
number of instances in each state, such as `running_setup` or `online`.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy
that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_stack_summary(client, input, options \\ []) do
request(client, "DescribeStackSummary", input, options)
end
@doc """
Requests a description of one or more stacks.
**Required Permissions**: To use this action, an IAM user must have a Show,
Deploy, or Manage permissions level for the stack, or an attached policy
that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_stacks(client, input, options \\ []) do
request(client, "DescribeStacks", input, options)
end
@doc """
Describes time-based auto scaling configurations for specified instances.
<note> You must specify at least one of the parameters.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_time_based_auto_scaling(client, input, options \\ []) do
request(client, "DescribeTimeBasedAutoScaling", input, options)
end
@doc """
Describe specified users.
**Required Permissions**: To use this action, an IAM user must have an
attached policy that explicitly grants permissions. For more information on
user permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_user_profiles(client, input, options \\ []) do
request(client, "DescribeUserProfiles", input, options)
end
@doc """
Describes an instance's Amazon EBS volumes.
<note> This call accepts only one resource-identifying parameter.
</note> **Required Permissions**: To use this action, an IAM user must have
a Show, Deploy, or Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def describe_volumes(client, input, options \\ []) do
request(client, "DescribeVolumes", input, options)
end
@doc """
Detaches a specified Elastic Load Balancing instance from its layer.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def detach_elastic_load_balancer(client, input, options \\ []) do
request(client, "DetachElasticLoadBalancer", input, options)
end
@doc """
Disassociates an Elastic IP address from its instance. The address remains
registered with the stack. For more information, see [Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def disassociate_elastic_ip(client, input, options \\ []) do
request(client, "DisassociateElasticIp", input, options)
end
@doc """
Gets a generated host name for the specified layer, based on the current
host name theme.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def get_hostname_suggestion(client, input, options \\ []) do
request(client, "GetHostnameSuggestion", input, options)
end
@doc """
<note> This action can be used only with Windows stacks.
</note> Grants RDP access to a Windows instance for a specified time
period.
"""
def grant_access(client, input, options \\ []) do
request(client, "GrantAccess", input, options)
end
@doc """
Returns a list of tags that are applied to the specified stack or layer.
"""
def list_tags(client, input, options \\ []) do
request(client, "ListTags", input, options)
end
@doc """
Reboots a specified instance. For more information, see [Starting,
Stopping, and Rebooting
Instances](http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def reboot_instance(client, input, options \\ []) do
request(client, "RebootInstance", input, options)
end
@doc """
Registers a specified Amazon ECS cluster with a stack. You can register
only one cluster with a stack. A cluster can be registered with only one
stack. For more information, see [ Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [ Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def register_ecs_cluster(client, input, options \\ []) do
request(client, "RegisterEcsCluster", input, options)
end
@doc """
Registers an Elastic IP address with a specified stack. An address can be
registered with only one stack at a time. If the address is already
registered, you must first deregister it by calling `DeregisterElasticIp`.
For more information, see [Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def register_elastic_ip(client, input, options \\ []) do
request(client, "RegisterElasticIp", input, options)
end
@doc """
Registers instances that were created outside of AWS OpsWorks Stacks with a
specified stack.
<note> We do not recommend using this action to register instances. The
complete registration operation includes two tasks: installing the AWS
OpsWorks Stacks agent on the instance, and registering the instance with
the stack. `RegisterInstance` handles only the second step. You should
instead use the AWS CLI `register` command, which performs the entire
registration operation. For more information, see [ Registering an Instance
with an AWS OpsWorks Stacks
Stack](http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register.html).
</note> Registered instances have the same requirements as instances that
are created by using the `CreateInstance` API. For example, registered
instances must be running a supported Linux-based operating system, and
they must have a supported instance type. For more information about
requirements for instances that you want to register, see [ Preparing the
Instance](http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register-registering-preparer.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def register_instance(client, input, options \\ []) do
request(client, "RegisterInstance", input, options)
end
@doc """
Registers an Amazon RDS instance with a stack.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def register_rds_db_instance(client, input, options \\ []) do
request(client, "RegisterRdsDbInstance", input, options)
end
@doc """
Registers an Amazon EBS volume with a specified stack. A volume can be
registered with only one stack at a time. If the volume is already
registered, you must first deregister it by calling `DeregisterVolume`. For
more information, see [Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def register_volume(client, input, options \\ []) do
request(client, "RegisterVolume", input, options)
end
@doc """
Specify the load-based auto scaling configuration for a specified layer.
For more information, see [Managing Load with Time-based and Load-based
Instances](http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html).
<note> To use load-based auto scaling, you must create a set of load-based
auto scaling instances. Load-based auto scaling operates only on the
instances from that set, so you must ensure that you have created enough
instances to handle the maximum anticipated load.
</note> **Required Permissions**: To use this action, an IAM user must have
a Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def set_load_based_auto_scaling(client, input, options \\ []) do
request(client, "SetLoadBasedAutoScaling", input, options)
end
@doc """
Specifies a user's permissions. For more information, see [Security and
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/workingsecurity.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def set_permission(client, input, options \\ []) do
request(client, "SetPermission", input, options)
end
@doc """
Specify the time-based auto scaling configuration for a specified instance.
For more information, see [Managing Load with Time-based and Load-based
Instances](http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def set_time_based_auto_scaling(client, input, options \\ []) do
request(client, "SetTimeBasedAutoScaling", input, options)
end
@doc """
Starts a specified instance. For more information, see [Starting, Stopping,
and Rebooting
Instances](http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def start_instance(client, input, options \\ []) do
request(client, "StartInstance", input, options)
end
@doc """
Starts a stack's instances.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def start_stack(client, input, options \\ []) do
request(client, "StartStack", input, options)
end
@doc """
Stops a specified instance. When you stop a standard instance, the data
disappears and must be reinstalled when you restart the instance. You can
stop an Amazon EBS-backed instance without losing data. For more
information, see [Starting, Stopping, and Rebooting
Instances](http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def stop_instance(client, input, options \\ []) do
request(client, "StopInstance", input, options)
end
@doc """
Stops a specified stack.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def stop_stack(client, input, options \\ []) do
request(client, "StopStack", input, options)
end
@doc """
Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks
Stacks. For more information about how tagging works, see
[Tags](http://docs.aws.amazon.com/opsworks/latest/userguide/tagging.html)
in the AWS OpsWorks User Guide.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Unassigns a registered instance from all of it's layers. The instance
remains in the stack as an unassigned instance and can be assigned to
another layer, as needed. You cannot use this action with instances that
were created with AWS OpsWorks Stacks.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def unassign_instance(client, input, options \\ []) do
request(client, "UnassignInstance", input, options)
end
@doc """
Unassigns an assigned Amazon EBS volume. The volume remains registered with
the stack. For more information, see [Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def unassign_volume(client, input, options \\ []) do
request(client, "UnassignVolume", input, options)
end
@doc """
Removes tags from a specified stack or layer.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates a specified app.
**Required Permissions**: To use this action, an IAM user must have a
Deploy or Manage permissions level for the stack, or an attached policy
that explicitly grants permissions. For more information on user
permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_app(client, input, options \\ []) do
request(client, "UpdateApp", input, options)
end
@doc """
Updates a registered Elastic IP address's name. For more information, see
[Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_elastic_ip(client, input, options \\ []) do
request(client, "UpdateElasticIp", input, options)
end
@doc """
Updates a specified instance.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_instance(client, input, options \\ []) do
request(client, "UpdateInstance", input, options)
end
@doc """
Updates a specified layer.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_layer(client, input, options \\ []) do
request(client, "UpdateLayer", input, options)
end
@doc """
Updates a user's SSH public key.
**Required Permissions**: To use this action, an IAM user must have
self-management enabled or an attached policy that explicitly grants
permissions. For more information on user permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_my_user_profile(client, input, options \\ []) do
request(client, "UpdateMyUserProfile", input, options)
end
@doc """
Updates an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_rds_db_instance(client, input, options \\ []) do
request(client, "UpdateRdsDbInstance", input, options)
end
@doc """
Updates a specified stack.
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_stack(client, input, options \\ []) do
request(client, "UpdateStack", input, options)
end
@doc """
Updates a specified user profile.
**Required Permissions**: To use this action, an IAM user must have an
attached policy that explicitly grants permissions. For more information on
user permissions, see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_user_profile(client, input, options \\ []) do
request(client, "UpdateUserProfile", input, options)
end
@doc """
Updates an Amazon EBS volume's name or mount point. For more information,
see [Resource
Management](http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html).
**Required Permissions**: To use this action, an IAM user must have a
Manage permissions level for the stack, or an attached policy that
explicitly grants permissions. For more information on user permissions,
see [Managing User
Permissions](http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html).
"""
def update_volume(client, input, options \\ []) do
request(client, "UpdateVolume", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "opsworks"}
host = get_host("opsworks", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "OpsWorks_20130218.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end | lib/aws/ops_works.ex | 0.8827 | 0.480966 | ops_works.ex | starcoder |
defmodule Cluster.Strategy.Kubernetes do
@moduledoc """
This clustering strategy works by loading all endpoints in the current Kubernetes
namespace with the configured label. It will fetch the addresses of all endpoints with
that label and attempt to connect. It will continually monitor and update its
connections every 5s. Alternatively the IP can be looked up from the pods directly
by setting `kubernetes_ip_lookup_mode` to `:pods`.
In order for your endpoints to be found they should be returned when you run:
kubectl get endpoints -l app=myapp
In order for your pods to be found they should be returned when you run:
kubectl get pods -l app=myapp
It assumes that all nodes share a base name, are using longnames, and are unique
based on their FQDN, rather than the base hostname. In other words, in the following
longname, `<basename>@<domain>`, `basename` would be the value configured in
`kubernetes_node_basename`.
`domain` would be the value configured in `mode` and can be either of type `:ip`
(the pod's ip, can be obtained by setting an env variable to status.podIP), `:hostname`
or `:dns`, which is the pod's internal A Record. This A Record has the format
`<ip-with-dashes>.<namespace>.pod.cluster.local`, e.g.
`1-2-3-4.default.pod.cluster.local`.
Getting `:dns` to work requires setting the `POD_A_RECORD` environment variable before
the application starts. If you use Distillery you can set it in your `pre_configure` hook:
# deployment.yaml
command: ["sh", "-c"]
args: ["POD_A_RECORD"]
args: ["export POD_A_RECORD=$(echo $POD_IP | sed 's/\./-/g') && /app/bin/app foreground"]
# vm.args
-name app@<%= "${POD_A_RECORD}.${NAMESPACE}.pod.cluster.local" %>
To set the `NAMESPACE` and `POD_IP` environment variables you can configure your pod as follows:
# deployment.yaml
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
The benefit of using `:dns` over `:ip` is that you can establish a remote shell (as well as
run observer) by using `kubectl port-forward` in combination with some entries in `/etc/hosts`.
Using `:hostname` is useful when deploying your app to K8S as a stateful set. In this case you can
set your erlang name as the fully qualified domain name of the pod which would be something similar to
`my-app-0.my-service-name.my-namespace.svc.cluster.local`.
e.g.
# vm.args
-name app@<%=`(hostname -f)`%>
In this case you must also set `kubernetes_service_name` to the name of the K8S service that is being queried.
`mode` defaults to `:ip`.
An example configuration is below:
config :libcluster,
topologies: [
k8s_example: [
strategy: #{__MODULE__},
config: [
mode: :ip,
kubernetes_node_basename: "myapp",
kubernetes_selector: "app=myapp",
kubernetes_namespace: "my_namespace",
polling_interval: 10_000]]]
"""
use GenServer
use Cluster.Strategy
import Cluster.Logger
alias Cluster.Strategy.State
@default_polling_interval 5_000
@kubernetes_master "kubernetes.default.svc"
@service_account_path "/var/run/secrets/kubernetes.io/serviceaccount"
def start_link(args), do: GenServer.start_link(__MODULE__, args)
@impl true
def init([%State{meta: nil} = state]) do
init([%State{state | :meta => MapSet.new()}])
end
def init([%State{} = state]) do
{:ok, load(state)}
end
@impl true
def handle_info(:timeout, state) do
handle_info(:load, state)
end
def handle_info(:load, %State{} = state) do
{:noreply, load(state)}
end
def handle_info(_, state) do
{:noreply, state}
end
defp load(%State{topology: topology} = state) do
new_nodelist = MapSet.new(get_nodes(state))
removed = MapSet.difference(state.meta, new_nodelist)
new_nodelist =
case Cluster.Strategy.disconnect_nodes(
topology,
state.disconnect,
state.list_nodes,
MapSet.to_list(removed)
) do
:ok ->
new_nodelist
{:error, bad_nodes} ->
# Add back the nodes which should have been removed, but which couldn't be for some reason
Enum.reduce(bad_nodes, new_nodelist, fn {n, _}, acc ->
MapSet.put(acc, n)
end)
end
new_nodelist =
case Cluster.Strategy.connect_nodes(
topology,
state.connect,
state.list_nodes,
MapSet.to_list(new_nodelist)
) do
:ok ->
new_nodelist
{:error, bad_nodes} ->
# Remove the nodes which should have been added, but couldn't be for some reason
Enum.reduce(bad_nodes, new_nodelist, fn {n, _}, acc ->
MapSet.delete(acc, n)
end)
end
Process.send_after(self(), :load, polling_interval(state))
%State{state | meta: new_nodelist}
end
defp polling_interval(%State{config: config}) do
Keyword.get(config, :polling_interval, @default_polling_interval)
end
@spec get_token(String.t()) :: String.t()
defp get_token(service_account_path) do
path = Path.join(service_account_path, "token")
case File.exists?(path) do
true -> path |> File.read!() |> String.trim()
false -> ""
end
end
@spec get_namespace(String.t(), String.t()) :: String.t()
if Mix.env() == :test do
defp get_namespace(_service_account_path, nil), do: "__libcluster_test"
else
defp get_namespace(service_account_path, nil) do
path = Path.join(service_account_path, "namespace")
if File.exists?(path) do
path |> File.read!() |> String.trim()
else
""
end
end
end
defp get_namespace(_, namespace), do: namespace
@spec get_nodes(State.t()) :: [atom()]
defp get_nodes(%State{topology: topology, config: config, meta: meta}) do
service_account_path =
Keyword.get(config, :kubernetes_service_account_path, @service_account_path)
token = get_token(service_account_path)
namespace = get_namespace(service_account_path, Keyword.get(config, :kubernetes_namespace))
app_name = Keyword.fetch!(config, :kubernetes_node_basename)
cluster_name = Keyword.get(config, :kubernetes_cluster_name, "cluster")
service_name = Keyword.get(config, :kubernetes_service_name)
selector = Keyword.fetch!(config, :kubernetes_selector)
ip_lookup_mode = Keyword.get(config, :kubernetes_ip_lookup_mode, :endpoints)
master_name = Keyword.get(config, :kubernetes_master, @kubernetes_master)
cluster_domain = System.get_env("CLUSTER_DOMAIN", "#{cluster_name}.local")
master =
cond do
String.ends_with?(master_name, cluster_domain) ->
master_name
String.ends_with?(master_name, ".") ->
# The dot at the end is used to determine that the name is "final"
master_name
:else ->
master_name <> "." <> cluster_domain
end
cond do
app_name != nil and selector != nil ->
selector = URI.encode(selector)
path =
case ip_lookup_mode do
:endpoints -> "api/v1/namespaces/#{namespace}/endpoints?labelSelector=#{selector}"
:pods -> "api/v1/namespaces/#{namespace}/pods?labelSelector=#{selector}"
end
headers = [{'authorization', 'Bearer #{token}'}]
http_options =
Keyword.get(config, :kubernetes_httpc_http_options, ssl: [verify: :none], timeout: 15000)
options = Keyword.get(config, :kubernetes_httpc_options, [])
uri_port = Keyword.get(config, :kubernetes_port, 443)
uri_scheme = Keyword.get(config, :kubernetes_scheme, 'https')
case :httpc.request(
:get,
{'#{uri_scheme}://#{master}:#{uri_port}/#{path}', headers},
http_options,
options
) do
{:ok, {{_version, 200, _status}, _headers, body}} ->
parse_response(ip_lookup_mode, Jason.decode!(body))
|> Enum.map(fn node_info ->
format_node(
Keyword.get(config, :mode, :ip),
node_info,
app_name,
cluster_name,
service_name
)
end)
{:ok, {{_version, 403, _status}, _headers, body}} ->
%{"message" => msg} = Jason.decode!(body)
warn(topology, "cannot query kubernetes (unauthorized): #{msg}")
[]
{:ok, {{_version, code, status}, _headers, body}} ->
warn(topology, "cannot query kubernetes (#{code} #{status}): #{inspect(body)}")
meta
{:error, reason} ->
error(topology, "request to kubernetes failed!: #{inspect(reason)}")
meta
end
app_name == nil ->
warn(
topology,
"kubernetes strategy is selected, but :kubernetes_node_basename is not configured!"
)
[]
selector == nil ->
warn(
topology,
"kubernetes strategy is selected, but :kubernetes_selector is not configured!"
)
[]
:else ->
warn(topology, "kubernetes strategy is selected, but is not configured!")
[]
end
end
defp parse_response(:endpoints, resp) do
case resp do
%{"items" => items} when is_list(items) ->
Enum.reduce(items, [], fn
%{"subsets" => subsets}, acc when is_list(subsets) ->
addrs =
Enum.flat_map(subsets, fn
%{"addresses" => addresses} when is_list(addresses) ->
Enum.map(addresses, fn %{"ip" => ip, "targetRef" => %{"namespace" => namespace}} =
address ->
%{ip: ip, namespace: namespace, hostname: address["hostname"]}
end)
_ ->
[]
end)
acc ++ addrs
_, acc ->
acc
end)
_ ->
[]
end
end
defp parse_response(:pods, resp) do
case resp do
%{"items" => items} when is_list(items) ->
Enum.map(items, fn
%{
"status" => %{"podIP" => ip},
"metadata" => %{"namespace" => ns},
"spec" => pod_spec
} ->
%{ip: ip, namespace: ns, hostname: pod_spec["hostname"]}
_ ->
nil
end)
|> Enum.filter(&(&1 != nil))
_ ->
[]
end
end
defp format_node(:ip, %{ip: ip}, app_name, _cluster_name, _service_name),
do: :"#{app_name}@#{ip}"
defp format_node(
:hostname,
%{hostname: hostname, namespace: namespace},
app_name,
cluster_name,
service_name
) do
:"#{app_name}@#{hostname}.#{service_name}.#{namespace}.svc.#{cluster_name}.local"
end
defp format_node(:dns, %{ip: ip, namespace: namespace}, app_name, cluster_name, _service_name) do
ip = String.replace(ip, ".", "-")
:"#{app_name}@#{ip}.#{namespace}.pod.#{cluster_name}.local"
end
end | lib/strategy/kubernetes.ex | 0.844985 | 0.583797 | kubernetes.ex | starcoder |
import Croma.Defun
defmodule Croma.BuiltinType do
@moduledoc false
@type_infos [
{Croma.Atom , "atom" , (quote do: atom ), (quote do: is_atom (var!(x)))},
{Croma.Boolean , "boolean" , (quote do: boolean ), (quote do: is_boolean (var!(x)))},
{Croma.Float , "float" , (quote do: float ), (quote do: is_float (var!(x)))},
{Croma.Integer , "integer" , (quote do: integer ), (quote do: is_integer (var!(x)))},
{Croma.Number , "number" , (quote do: number ), (quote do: is_number (var!(x)))},
{Croma.String , "String.t" , (quote do: String.t ), (quote do: is_binary (var!(x)))},
{Croma.Binary , "binary" , (quote do: binary ), (quote do: is_binary (var!(x)))},
{Croma.BitString , "bitstring" , (quote do: bitstring ), (quote do: is_bitstring(var!(x)))},
{Croma.Function , "function" , (quote do: function ), (quote do: is_function (var!(x)))},
{Croma.Pid , "pid" , (quote do: pid ), (quote do: is_pid (var!(x)))},
{Croma.Port , "port" , (quote do: port ), (quote do: is_port (var!(x)))},
{Croma.Reference , "reference" , (quote do: reference ), (quote do: is_reference(var!(x)))},
{Croma.Tuple , "tuple" , (quote do: tuple ), (quote do: is_tuple (var!(x)))},
{Croma.List , "list" , (quote do: list ), (quote do: is_list (var!(x)))},
{Croma.Map , "map" , (quote do: map ), (quote do: is_map (var!(x)))},
{Croma.Byte , "byte" , (quote do: byte ), (quote do: var!(x) in 0..255)},
{Croma.Char , "char" , (quote do: char ), (quote do: var!(x) in 0..0x10ffff)},
{Croma.PosInteger , "pos_integer" , (quote do: pos_integer ), (quote do: is_integer(var!(x)) and var!(x) > 0)},
{Croma.NegInteger , "neg_integer" , (quote do: neg_integer ), (quote do: is_integer(var!(x)) and var!(x) < 0)},
{Croma.NonNegInteger, "non_neg_integer", (quote do: non_neg_integer), (quote do: is_integer(var!(x)) and var!(x) >= 0)},
]
def type_infos(), do: @type_infos
def all() do
Enum.map(@type_infos, fn {m, _, _, _} -> m end)
end
end
Croma.BuiltinType.type_infos |> Enum.each(fn {mod, type_name, type_expr, guard_expr} ->
defmodule mod do
@moduledoc """
Module that represents the Elixir's built-in #{type_name} type.
Intended to be used with other parts of croma to express #{type_name} variables.
"""
@type t :: unquote(type_expr)
@doc """
Simply checks the argument's type using `#{Macro.to_string(guard_expr)}`.
"""
defun valid?(value :: term) :: boolean do
x when unquote(guard_expr) -> true
_ -> false
end
end
end)
defmodule Croma.Any do
@moduledoc """
Module that represents any Elixir term.
"""
@type t :: any
@doc """
Always returns `true`
Intended to be used with other parts of croma to express variables with `any` type.
"""
defun valid?(_value :: term) :: boolean do
true
end
end | lib/croma/builtin_type.ex | 0.664649 | 0.434701 | builtin_type.ex | starcoder |
defmodule Recase.Enumerable do
@moduledoc """
Helper module to convert enumerable keys recursively.
"""
@doc """
Invoke fun for each keys of the enumerable and cast keys to atoms.
"""
@spec atomize_keys(Enumerable.t()) :: Enumerable.t()
def atomize_keys(enumerable),
do: atomize_keys(enumerable, fn x -> x end)
@spec atomize_keys(Enumerable.t(), fun) :: Enumerable.t()
def atomize_keys(enumerable, fun) when is_map(enumerable) do
enumerable
|> Enum.into(%{}, fn {key, value} ->
atom_key =
key
|> cast_string()
|> fun.()
|> cast_atom()
{atom_key, handle_value(value, fun, &atomize_keys/2)}
end)
end
def atomize_keys(enumerable, fun)
when is_list(enumerable) do
enumerable
|> Enum.map(fn value -> handle_value(value, fun, &atomize_keys/2) end)
end
@spec stringify_keys(Enumerable.t()) :: Enumerable.t()
def stringify_keys(enumerable),
do: stringify_keys(enumerable, fn x -> x end)
@spec stringify_keys(Enumerable.t(), fun) :: Enumerable.t()
def stringify_keys(enumerable, fun)
when is_map(enumerable) do
enumerable
|> Enum.into(%{}, fn {key, value} ->
string_key =
key
|> cast_string()
|> fun.()
{string_key, handle_value(value, fun, &stringify_keys/2)}
end)
end
def stringify_keys(enumerable, fun)
when is_list(enumerable) do
enumerable
|> Enum.map(fn value -> handle_value(value, fun, &stringify_keys/2) end)
end
@doc """
Invoke fun for each keys of the enumerable.
"""
@spec convert_keys(Enumerable.t()) :: Enumerable.t()
def convert_keys(enumerable),
do: convert_keys(enumerable, fn x -> x end)
@spec convert_keys(Enumerable.t(), fun) :: Enumerable.t()
def convert_keys(enumerable, fun) when is_map(enumerable) do
enumerable
|> Enum.into(%{}, fn {key, value} ->
{fun.(key), handle_value(value, fun, &convert_keys/2)}
end)
end
def convert_keys(enumerable, fun)
when is_list(enumerable) do
enumerable
|> Enum.map(fn value -> handle_value(value, fun, &convert_keys/2) end)
end
defp handle_value(%DateTime{} = value, _fun, _converter), do: value
defp handle_value(value, fun, converter)
when is_map(value) or is_list(value) do
converter.(value, fun)
end
defp handle_value(value, _, _), do: value
defp cast_string(value) when is_binary(value), do: value
defp cast_string(value) when is_atom(value), do: Atom.to_string(value)
defp cast_atom(value), do: String.to_atom(value)
end | lib/recase/utils/enumerable.ex | 0.822724 | 0.591694 | enumerable.ex | starcoder |
defmodule GenRtmpServer do
@moduledoc """
A behaviour module for implementing an RTMP server.
A GenRtmpServer abstracts out the the handling of RTMP connection handling
and data so that modules that implement this behaviour can focus on
the business logic of the actual RTMP events that are received and
should be sent.
Each client that connects is placed in it's own process.
"""
require Logger
@type session_id :: String.t
@type client_ip :: String.t
@type adopter_state :: any
@type command :: :ignore | :disconnect
@type request_result :: :accepted | {:rejected, command, String.t}
@type outbound_data :: GenRtmpServer.AudioVideoData.t | GenRtmpServer.MetaData.t
@type stream_id :: non_neg_integer
@type forced_timestamp :: non_neg_integer | nil
@type adopter_arguments :: [...]
@doc "Called when a new RTMP client connects"
@callback init(session_id, client_ip, adopter_arguments) :: {:ok, adopter_state}
@doc "Called when the client is requesting a connection to the specified application name"
@callback connection_requested(Rtmp.ServerSession.Events.ConnectionRequested.t, adopter_state)
:: {request_result, adopter_state}
@doc """
Called when a client wants to publish a stream to the specified application name
and stream key combination
"""
@callback publish_requested(Rtmp.ServerSession.Events.PublishStreamRequested.t, adopter_state)
:: {request_result, adopter_state}
@doc """
Called when the client is no longer publishing to the specified application name
and stream key
"""
@callback publish_finished(Rtmp.ServerSession.Events.PublishingFinished.t, adopter_state)
:: {:ok, adopter_state}
@doc """
Called when the client is wanting to play a stream from the specified application
name and stream key combination
"""
@callback play_requested(Rtmp.ServerSession.Events.PlayStreamRequested.t, adopter_state)
:: {request_result, adopter_state}
@doc """
Called when the client no longer wants to play the stream from the specified
application name and stream key combination
"""
@callback play_finished(Rtmp.ServerSession.Events.PlayStreamFinished.t, adopter_state)
:: {:ok, adopter_state}
@doc """
Called when a client publishing a stream has changed the metadata information
for that stream.
"""
@callback metadata_received(Rtmp.ServerSession.Events.StreamMetaDataChanged.t, adopter_state)
:: {:ok, adopter_state}
@doc """
Called when audio or video data has been received on a published stream
"""
@callback audio_video_data_received(Rtmp.ServerSession.Events.AudioVideoDataReceived.t, adopter_state)
:: {:ok, adopter_state}
@doc """
Called when the number of bytes sent and received to the client changes
"""
@callback byte_io_totals_updated(Rtmp.ServerSession.Events.NewByteIOTotals.t, adopter_state)
:: {:ok, adopter_state}
@doc """
Called when the client sends an acknowledgement of bytes received
"""
@callback acknowledgement_received(Rtmp.ServerSession.Events.AcknowledgementReceived.t, adopter_state)
:: {:ok, adopter_state}
@doc """
Called when the server has successfully sent a ping request. This is needed to be handled
if the server implementation wants track how long it's been since a ping request has gone
unresponded to, or if the server wants to get an idea of latency
"""
@callback ping_request_sent(Rtmp.ServerSession.Events.PingRequestSent.t, adopter_state)
:: {:ok, adopter_state}
@doc """
Called when the server has received a response to a ping request. Note that unsolicited
ping responses may come through, and it's up to the behavior implementor to decide how to
react to it.
"""
@callback ping_response_received(Rtmp.ServerSession.Events.PingResponseReceived.t, adopter_state)
:: {:ok, adopter_state}
@doc "Called when an code change is ocurring"
@callback code_change(any, adopter_state) :: {:ok, adopter_state} | {:error, String.t}
@doc """
Called when any BEAM message is received that is not handleable by the generic RTMP server,
and is thus being passed along to the module adopting this behaviour.
"""
@callback handle_message(any, adopter_state) :: {:ok, adopter_state}
@doc """
Called when the TCP socket is closed. Allows for any last minute cleanup before
the process is killed
"""
@callback handle_disconnection(adopter_state) :: {:ok, adopter_state}
@spec start_link(module(), %GenRtmpServer.RtmpOptions{}, adopter_arguments) :: Supervisor.on_start
@doc """
Starts the generic RTMP server using the provided RTMP options
"""
def start_link(module, options = %GenRtmpServer.RtmpOptions{}, additional_args \\ []) do
{:ok, _} = Application.ensure_all_started(:ranch)
_ = Logger.info "Starting RTMP listener on port #{options.port}"
:ranch.start_listener(module,
10,
:ranch_tcp,
[port: options.port],
GenRtmpServer.Protocol,
[module, options, additional_args])
end
@spec send_message(pid, outbound_data, stream_id, forced_timestamp) :: :ok
@doc """
Signals a specific RTMP server process to send an RTMP message to its client
"""
def send_message(pid, outbound_data, stream_id, forced_timestamp \\ nil) do
send(pid, {:rtmp_send, outbound_data, stream_id, forced_timestamp})
end
@spec send_ping_request(pid) :: :ok
@doc """
Sends a ping request to the client
"""
def send_ping_request(pid) do
send(pid, :send_ping_request)
end
end | apps/gen_rtmp_server/lib/gen_rtmp_server.ex | 0.620507 | 0.402627 | gen_rtmp_server.ex | starcoder |
defmodule Xgit.Util.NB do
@moduledoc false
# Internal conversion utilities for network byte order handling.
use Bitwise
import Xgit.Util.ForceCoverage
@doc ~S"""
Parses a sequence of 4 bytes (network byte order) as a signed integer.
Reads the first four bytes from `intbuf` and returns `{value, buf}`
where value is the integer value from the first four bytes at `intbuf`
and `buf` is the remainder of the byte array after those bytes.
"""
@spec decode_int32(intbuf :: [byte]) :: {integer, [byte]}
def decode_int32(intbuf)
def decode_int32([b1, b2, b3, b4 | tail]) when b1 >= 128,
do: cover({b1 * 0x1000000 + b2 * 0x10000 + b3 * 0x100 + b4 - 0x100000000, tail})
def decode_int32([b1, b2, b3, b4 | tail]),
do: cover({b1 * 0x1000000 + b2 * 0x10000 + b3 * 0x100 + b4, tail})
@doc ~S"""
Parses a sequence of 2 bytes (network byte order) as an unsigned integer.
Reads the first four bytes from `intbuf` and returns `{value, buf}`
where value is the unsigned integer value from the first two bytes at `intbuf`
and `buf` is the remainder of the byte array after those bytes.
"""
@spec decode_uint16(intbuf :: [byte]) :: {integer, [byte]}
def decode_uint16(intbuf)
def decode_uint16([b1, b2 | tail]), do: cover({b1 * 0x100 + b2, tail})
@doc ~S"""
Parses a sequence of 4 bytes (network byte order) as an unsigned integer.
Reads the first four bytes from `intbuf` and returns `{value, buf}`
where value is the unsigned integer value from the first four bytes at `intbuf`
and `buf` is the remainder of the byte array after those bytes.
"""
@spec decode_uint32(intbuf :: [byte]) :: {integer, [byte]}
def decode_uint32(intbuf)
def decode_uint32([b1, b2, b3, b4 | tail]),
do: cover({b1 * 0x1000000 + b2 * 0x10000 + b3 * 0x100 + b4, tail})
@doc ~S"""
Convert a 16-bit integer to a sequence of two bytes in network byte order.
"""
@spec encode_int16(v :: integer) :: [byte]
def encode_int16(v) when is_integer(v) and v >= -32_768 and v <= 65_535,
do: cover([v >>> 8 &&& 0xFF, v &&& 0xFF])
@doc ~S"""
Convert a 32-bit integer to a sequence of four bytes in network byte order.
"""
@spec encode_int32(v :: integer) :: [byte]
def encode_int32(v) when is_integer(v) and v >= -2_147_483_647 and v <= 4_294_967_295,
do: cover([v >>> 24 &&& 0xFF, v >>> 16 &&& 0xFF, v >>> 8 &&& 0xFF, v &&& 0xFF])
@doc ~S"""
Convert a 16-bit unsigned integer to a sequence of two bytes in network byte order.
"""
@spec encode_uint16(v :: non_neg_integer) :: [byte]
def encode_uint16(v) when is_integer(v) and v >= 0 and v <= 65_535,
do: cover([v >>> 8 &&& 0xFF, v &&& 0xFF])
@doc ~S"""
Convert a 32-bit unsigned integer to a sequence of four bytes in network byte order.
"""
@spec encode_uint32(v :: non_neg_integer) :: [byte]
def encode_uint32(v) when is_integer(v) and v >= 0 and v <= 4_294_967_295,
do: cover([v >>> 24 &&& 0xFF, v >>> 16 &&& 0xFF, v >>> 8 &&& 0xFF, v &&& 0xFF])
end | lib/xgit/util/nb.ex | 0.877601 | 0.767777 | nb.ex | starcoder |
defmodule Serum.PostList do
@moduledoc """
Defines a struct representing a list of blog posts.
## Fields
* `tag`: Specifies by which tag the posts are filtered. Can be `nil`
* `current_page`: Number of current page
* `max_page`: Number of the last page
* `title`: Title of the list
* `posts`: A list of `Post` structs
* `url`: Absolute URL of this list page in the website
* `prev_url`: Absolute URL of the previous list page. Can be `nil` if this is
the first page
* `next_url`: Absolute URL of the next list page. Can be `nil` if this is
the last page
* `output`: Destination path
"""
alias Serum.Fragment
alias Serum.Plugin
alias Serum.Renderer
alias Serum.Result
alias Serum.Tag
@type t :: %__MODULE__{
tag: maybe_tag(),
current_page: pos_integer(),
max_page: pos_integer(),
title: binary(),
posts: [map()],
url: binary(),
prev_url: binary() | nil,
next_url: binary() | nil,
output: binary(),
extras: %{optional(binary()) => binary()}
}
@type maybe_tag :: Tag.t() | nil
defstruct [
:tag,
:current_page,
:max_page,
:title,
:posts,
:url,
:prev_url,
:next_url,
:output,
:extras
]
@spec generate(maybe_tag(), [map()], map()) :: Result.t([t()])
def generate(tag, posts, proj) do
paginate? = proj.pagination
num_posts = proj.posts_per_page
paginated_posts =
posts
|> make_chunks(paginate?, num_posts)
|> Enum.with_index(1)
max_page = length(paginated_posts)
list_dir = (tag && Path.join("tags", tag.name)) || "posts"
lists =
Enum.map(paginated_posts, fn {posts, page} ->
%__MODULE__{
tag: tag,
current_page: page,
max_page: max_page,
title: list_title(tag, proj),
posts: posts,
url: Path.join([proj.base_url, list_dir, "page-#{page}.html"]),
output: Path.join([proj.dest, list_dir, "page-#{page}.html"]),
extras: %{}
}
end)
[first | rest] = put_adjacent_urls([nil | lists], [])
first_dup = %__MODULE__{
first
| url: Path.join([proj.base_url, list_dir, "index.html"]),
output: Path.join([proj.dest, list_dir, "index.html"])
}
[first_dup, first | rest]
|> Enum.map(&Plugin.processed_list/1)
|> Result.aggregate_values(:generate_lists)
end
@spec compact(t()) :: map()
def compact(%__MODULE__{} = list) do
list
|> Map.drop(~w(__struct__ output)a)
|> Map.put(:type, :list)
end
@spec put_adjacent_urls([nil | t()], [t()]) :: [t()]
defp put_adjacent_urls(lists, acc)
defp put_adjacent_urls([_last], acc), do: Enum.reverse(acc)
defp put_adjacent_urls([prev, curr | rest], acc) do
next = List.first(rest)
updated_curr = %__MODULE__{
curr
| prev_url: prev && prev.url,
next_url: next && next.url
}
put_adjacent_urls([curr | rest], [updated_curr | acc])
end
@spec make_chunks([map()], boolean(), pos_integer()) :: [[map()]]
defp make_chunks(posts, paginate?, num_posts)
defp make_chunks([], _, _), do: [[]]
defp make_chunks(posts, false, _), do: [posts]
defp make_chunks(posts, true, num_posts) do
Enum.chunk_every(posts, num_posts)
end
@spec list_title(maybe_tag(), map()) :: binary()
defp list_title(tag, proj)
defp list_title(nil, proj), do: proj.list_title_all
defp list_title(%Tag{name: tag_name}, proj) do
proj.list_title_tag
|> :io_lib.format([tag_name])
|> IO.iodata_to_binary()
end
@spec to_fragment(t(), map()) :: Result.t(Fragment.t())
def to_fragment(post_list, templates) do
metadata = compact(post_list)
template = templates["list"]
bindings = [page: metadata]
case Renderer.render_fragment(template, bindings) do
{:ok, html} -> Fragment.new(nil, post_list.output, metadata, html)
{:error, _} = error -> error
end
end
defimpl Fragment.Source do
alias Serum.PostList
alias Serum.Result
@spec to_fragment(PostList.t(), map()) :: Result.t(Fragment.t())
def to_fragment(fragment, templates) do
PostList.to_fragment(fragment, templates)
end
end
end | lib/serum/post_list.ex | 0.797241 | 0.460532 | post_list.ex | starcoder |
defmodule Pow.Store.Backend.MnesiaCache.Unsplit do
@moduledoc """
GenServer that handles network split recovery for
`Pow.Store.Backend.MnesiaCache`.
This should be run on node(s) that has the `Pow.Store.Backend.MnesiaCache`
GenServer running. It'll subscribe to the Mnesia system messages, and listen
for `:inconsistent_database` system events. The first node to set the global
lock will find the island with the oldest node and restore that nodes table
into all the partitioned nodes.
If a table unrelated to Pow is also affected, an error will be logged and the
network will stay partitioned. If you don't mind potential data loss for any
of your tables in Mnesia, you can set `flush_tables: :all` to restore all the
affected tables from the oldest node in the cluster.
For better control, you can use
[`unsplit`](https://github.com/uwiger/unsplit) instead of this module.
## Usage
To start the GenServer, add it to your application `start/2` method:
defmodule MyApp.Application do
use Application
def start(_type, _args) do
children = [
MyApp.Repo,
MyAppWeb.Endpoint,
{Pow.Store.Backend.MnesiaCache, extra_db_nodes: Node.list()},
Pow.Store.Backend.MnesiaCache.Unsplit
]
opts = [strategy: :one_for_one, name: MyAppWeb.Supervisor]
Supervisor.start_link(children, opts)
end
# ...
end
## Strategy for multiple libraries using the Mnesia instance
It's strongly recommended to take into account any libraries that will be
using Mnesia for storage before using this module.
A common example would be a job queue, where a potential solution to prevent
data loss is to simply keep the job queue table on only one server instead of
replicating it among all nodes. When a network partition occurs, it won't be
part of the affected tables so this module can self-heal without the job
queue table set in `:flush_tables`.
## Initialization options
* `:flush_tables` - list of tables that may be flushed and restored from
the oldest node in the cluster. Defaults to `false` when only the
MnesiaCache table will be flushed. Use `:all` if you want to flush all
affected tables. Be aware that this may cause data loss.
"""
use GenServer
require Logger
alias Pow.Config
@mnesia_cache_tab Pow.Store.Backend.MnesiaCache
@spec start_link(Config.t()) :: GenServer.on_start()
def start_link(config) do
GenServer.start_link(__MODULE__, config, name: __MODULE__)
end
# Callbacks
@impl true
@spec init(Config.t()) :: {:ok, map()}
def init(config) do
:mnesia.subscribe(:system)
{:ok, %{config: config}}
end
@impl true
@spec handle_info({:mnesia_system_event, {:inconsistent_database, any(), any()}} | any(), map()) :: {:noreply, map()}
def handle_info({:mnesia_system_event, {:inconsistent_database, _context, node}}, %{config: config} = state) do
:global.trans({__MODULE__, self()}, fn -> autoheal(node, config) end)
{:noreply, state}
end
@impl true
def handle_info(_event, state) do
{:noreply, state}
end
@doc false
def __heal__(node, config), do: autoheal(node, config)
defp autoheal(node, config) do
:running_db_nodes
|> :mnesia.system_info()
|> Enum.member?(node)
|> case do
true ->
Logger.info("[#{inspect __MODULE__}] #{inspect node} has already healed and joined #{inspect node()}")
:ok
false ->
Logger.warn("[#{inspect __MODULE__}] Detected netsplit on #{inspect node}")
heal(node, config)
end
end
defp heal(node, config) do
node
|> affected_tables()
|> force_reload(node, config)
end
defp affected_tables(node) do
:tables
|> :mnesia.system_info()
|> List.delete(:schema)
|> List.foldl([], fn table, acc ->
nodes = get_all_nodes_for_table(table)
is_shared = node in nodes && node() in nodes
case is_shared do
true -> [table | acc]
false -> acc
end
end)
end
defp get_all_nodes_for_table(table) do
[:ram_copies, :disc_copies, :disc_only_copies]
|> Enum.map(&:mnesia.table_info(table, &1))
|> Enum.concat()
end
defp force_reload(tables, node, config) do
flushable_tables =
case Config.get(config, :flush_tables, false) do
false -> [@mnesia_cache_tab]
:all -> tables
tables -> Enum.uniq([@mnesia_cache_tab | tables])
end
maybe_force_reload(tables, flushable_tables, node)
end
defp maybe_force_reload(tables, flushable_tables, node) do
case tables -- flushable_tables do
[] ->
do_force_reload(tables, node)
unflushable_tables ->
Logger.error("[#{inspect __MODULE__}] Can't force reload unexpected tables #{inspect unflushable_tables}")
{:error, {:unexpected_tables, tables}}
end
end
defp do_force_reload(tables, node) do
[master_nodes, nodes] = sorted_cluster_islands(node)
for node <- nodes do
:stopped = :rpc.call(node, :mnesia, :stop, [])
for table <- tables, do: :ok = :rpc.call(node, :mnesia, :set_master_nodes, [table, master_nodes])
:ok = :rpc.block_call(node, :mnesia, :start, [])
:ok = :rpc.call(node, :mnesia, :wait_for_tables, [tables, :timer.seconds(15)])
Logger.info("[#{inspect __MODULE__}] #{inspect node} has been healed and joined #{inspect master_nodes}")
end
:ok
end
defp sorted_cluster_islands(node) do
island_a = :mnesia.system_info(:running_db_nodes)
island_b = :rpc.call(node, :mnesia, :system_info, [:running_db_nodes])
Enum.sort([island_a, island_b], &older?/2)
end
defp older?(island_a, island_b) do
all_nodes = get_all_nodes_for_table(@mnesia_cache_tab)
island_nodes = Enum.concat(island_a, island_b)
oldest_node = all_nodes |> Enum.reverse() |> Enum.find(&(&1 in island_nodes))
oldest_node in island_a
end
end | lib/pow/store/backend/mnesia_cache/unsplit.ex | 0.806205 | 0.44903 | unsplit.ex | starcoder |
defmodule Geometry.PolygonZM do
@moduledoc """
A polygon struct, representing a 3D polygon with a measurement.
A none empty line-string requires at least one ring with four points.
"""
alias Geometry.{GeoJson, LineStringZM, PolygonZM, WKB, WKT}
defstruct rings: []
@type t :: %PolygonZM{rings: [Geometry.coordinates()]}
@doc """
Creates an empty `PolygonZM`.
## Examples
iex> PolygonZM.new()
%PolygonZM{rings: []}
"""
@spec new :: t()
def new, do: %PolygonZM{}
@doc """
Creates a `PolygonZM` from the given `rings`.
## Examples
iex> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(35, 10, 13, 14),
...> PointZM.new(45, 45, 23, 24),
...> PointZM.new(10, 20, 33, 34),
...> PointZM.new(35, 10, 13, 14)
...> ]),
...> LineStringZM.new([
...> PointZM.new(20, 30, 13, 14),
...> PointZM.new(35, 35, 23, 24),
...> PointZM.new(30, 20, 33, 34),
...> PointZM.new(20, 30, 13, 14)
...> ])
...> ])
%PolygonZM{
rings: [
[[35, 10, 13, 14], [45, 45, 23, 24], [10, 20, 33, 34], [35, 10, 13, 14]],
[[20, 30, 13, 14], [35, 35, 23, 24], [30, 20, 33, 34], [20, 30, 13, 14]]
]
}
iex> PolygonZM.new()
%PolygonZM{}
"""
@spec new([LineStringZM.t()]) :: t()
def new(rings) when is_list(rings) do
%PolygonZM{rings: Enum.map(rings, fn line_string -> line_string.points end)}
end
@doc """
Returns `true` if the given `PolygonZM` is empty.
## Examples
iex> PolygonZM.empty?(PolygonZM.new())
true
iex> PolygonZM.empty?(
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(35, 10, 13, 14),
...> PointZM.new(45, 45, 23, 24),
...> PointZM.new(10, 20, 33, 34),
...> PointZM.new(35, 10, 13, 14)
...> ])
...> ])
...> )
false
"""
@spec empty?(t()) :: boolean
def empty?(%PolygonZM{rings: rings}), do: Enum.empty?(rings)
@doc """
Creates a `PolygonZM` from the given coordinates.
## Examples
iex> PolygonZM.from_coordinates([
...> [[1, 1, 1, 1], [2, 1, 2, 3], [2, 2, 3, 2], [1, 1, 1, 1]]
...> ])
%PolygonZM{
rings: [
[[1, 1, 1, 1], [2, 1, 2, 3], [2, 2, 3, 2], [1, 1, 1, 1]]
]
}
"""
@spec from_coordinates([Geometry.coordinate()]) :: t()
def from_coordinates(rings) when is_list(rings), do: %PolygonZM{rings: rings}
@doc """
Returns an `:ok` tuple with the `PolygonZM` from the given GeoJSON term.
Otherwise returns an `:error` tuple.
## Examples
iex> ~s(
...> {
...> "type": "Polygon",
...> "coordinates": [
...> [[35, 10, 11, 12],
...> [45, 45, 21, 22],
...> [15, 40, 31, 33],
...> [10, 20, 11, 55],
...> [35, 10, 11, 12]]
...> ]
...> }
...> )
iex> |> Jason.decode!()
iex> |> PolygonZM.from_geo_json()
{:ok, %PolygonZM{
rings: [
[
[35, 10, 11, 12],
[45, 45, 21, 22],
[15, 40, 31, 33],
[10, 20, 11, 55],
[35, 10, 11, 12]
]
]
}}
iex> ~s(
...> {
...> "type": "Polygon",
...> "coordinates": [
...> [[35, 10, 11, 12],
...> [45, 45, 21, 22],
...> [15, 40, 31, 33],
...> [10, 20, 11, 55],
...> [35, 10, 11, 12]],
...> [[20, 30, 11, 11],
...> [35, 35, 14, 55],
...> [30, 20, 12, 45],
...> [20, 30, 11, 11]]
...> ]
...> }
...> )
iex> |> Jason.decode!()
iex> |> PolygonZM.from_geo_json()
{:ok, %PolygonZM{
rings: [[
[35, 10, 11, 12],
[45, 45, 21, 22],
[15, 40, 31, 33],
[10, 20, 11, 55],
[35, 10, 11, 12]
], [
[20, 30, 11, 11],
[35, 35, 14, 55],
[30, 20, 12, 45],
[20, 30, 11, 11]
]]
}}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json), do: GeoJson.to_polygon(json, PolygonZM)
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_polygon(json, PolygonZM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `PolygonZM`.
## Examples
iex> PolygonZM.to_geo_json(
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(35, 10, 13, 14),
...> PointZM.new(45, 45, 23, 24),
...> PointZM.new(10, 20, 33, 34),
...> PointZM.new(35, 10, 13, 14)
...> ]),
...> LineStringZM.new([
...> PointZM.new(20, 30, 13, 14),
...> PointZM.new(35, 35, 23, 24),
...> PointZM.new(30, 20, 33, 34),
...> PointZM.new(20, 30, 13, 14)
...> ])
...> ])
...> )
%{
"type" => "Polygon",
"coordinates" => [
[
[35, 10, 13, 14],
[45, 45, 23, 24],
[10, 20, 33, 34],
[35, 10, 13, 14]
], [
[20, 30, 13, 14],
[35, 35, 23, 24],
[30, 20, 33, 34],
[20, 30, 13, 14]
]
]
}
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%PolygonZM{rings: rings}) do
%{
"type" => "Polygon",
"coordinates" => rings
}
end
@doc """
Returns an `:ok` tuple with the `PolygonZM` from the given WKT string.
Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
## Examples
iex> PolygonZM.from_wkt("
...> POLYGON ZM (
...> (35 10 11 22, 45 45 22 33, 15 40 33 44, 10 20 55 66, 35 10 11 22),
...> (20 30 22 55, 35 35 33 66, 30 20 88 99, 20 30 22 55)
...> )
...> ")
{:ok,
%PolygonZM{
rings: [
[
[35, 10, 11, 22],
[45, 45, 22, 33],
[15, 40, 33, 44],
[10, 20, 55, 66],
[35, 10, 11, 22]
], [
[20, 30, 22, 55],
[35, 35, 33, 66],
[30, 20, 88, 99],
[20, 30, 22, 55]
]
]
}}
iex> "
...> SRID=789;
...> POLYGON ZM (
...> (35 10 11 22, 45 45 22 33, 15 40 33 44, 10 20 55 66, 35 10 11 22),
...> (20 30 22 55, 35 35 33 66, 30 20 88 99, 20 30 22 55)
...> )
...> "
iex> |> PolygonZM.from_wkt()
{:ok, {
%PolygonZM{
rings: [
[
[35, 10, 11, 22],
[45, 45, 22, 33],
[15, 40, 33, 44],
[10, 20, 55, 66],
[35, 10, 11, 22]
], [
[20, 30, 22, 55],
[35, 35, 33, 66],
[30, 20, 88, 99],
[20, 30, 22, 55]
]
]
},
789
}}
iex> PolygonZM.from_wkt("Polygon ZM EMPTY")
{:ok, %PolygonZM{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, PolygonZM)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, PolygonZM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKT representation for a `PolygonZM`. With option `:srid` an
EWKT representation with the SRID is returned.
## Examples
iex> PolygonZM.to_wkt(PolygonZM.new())
"Polygon ZM EMPTY"
iex> PolygonZM.to_wkt(PolygonZM.new(), srid: 1123)
"SRID=1123;Polygon ZM EMPTY"
iex> PolygonZM.to_wkt(
...> PolygonZM.new([
...> LineStringZM.new([
...> PointZM.new(35, 10, 13, 14),
...> PointZM.new(45, 45, 23, 24),
...> PointZM.new(10, 20, 33, 34),
...> PointZM.new(35, 10, 13, 14)
...> ]),
...> LineStringZM.new([
...> PointZM.new(20, 30, 13, 14),
...> PointZM.new(35, 35, 23, 24),
...> PointZM.new(30, 20, 33, 34),
...> PointZM.new(20, 30, 13, 14)
...> ])
...> ])
...> )
"Polygon ZM ((35 10 13 14, 45 45 23 24, 10 20 33 34, 35 10 13 14), (20 30 13 14, 35 35 23 24, 30 20 33 34, 20 30 13 14))"
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%PolygonZM{rings: rings}, opts \\ []) do
WKT.to_ewkt(<<"Polygon ZM ", to_wkt_rings(rings)::binary()>>, opts)
end
@doc """
Returns the WKB representation for a `PolygonZM`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:xdr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.PointZM.to_wkb/1` function.
"""
@spec to_wkb(t(), opts) :: Geometry.wkb()
when opts: [endian: Geometry.endian(), srid: Geometry.srid(), mode: Geometry.mode()]
def to_wkb(%PolygonZM{rings: rings}, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
mode = Keyword.get(opts, :mode, Geometry.default_mode())
srid = Keyword.get(opts, :srid)
to_wkb(rings, srid, endian, mode)
end
@doc """
Returns an `:ok` tuple with the `PolygonZM` from the given WKB string. Otherwise
returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
The optional second argument determines if a `:hex`-string or a `:binary`
input is expected. The default is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.PointZM.from_wkb/2` function.
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, PolygonZM)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, PolygonZM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc false
@compile {:inline, to_wkt_rings: 1}
@spec to_wkt_rings(list()) :: String.t()
def to_wkt_rings([]), do: "EMPTY"
def to_wkt_rings([ring | rings]) do
<<
"(",
LineStringZM.to_wkt_points(ring)::binary(),
Enum.reduce(rings, "", fn ring, acc ->
<<acc::binary(), ", ", LineStringZM.to_wkt_points(ring)::binary()>>
end)::binary(),
")"
>>
end
@doc false
@compile {:inline, to_wkb: 4}
@spec to_wkb(coordinates, srid, endian, mode) :: wkb
when coordinates: [Geometry.coordinates()],
srid: Geometry.srid() | nil,
endian: Geometry.endian(),
mode: Geometry.mode(),
wkb: Geometry.wkb()
def to_wkb(rings, srid, endian, mode) do
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary(),
WKB.srid(srid, endian, mode)::binary(),
to_wkb_rings(rings, endian, mode)::binary()
>>
end
@compile {:inline, to_wkb_rings: 3}
defp to_wkb_rings(rings, endian, mode) do
Enum.reduce(rings, WKB.length(rings, endian, mode), fn ring, acc ->
<<acc::binary(), LineStringZM.to_wkb_points(ring, endian, mode)::binary()>>
end)
end
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "C0000003"
{:ndr, false} -> "030000C0"
{:xdr, true} -> "E0000003"
{:ndr, true} -> "030000E0"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0xC0000003::big-integer-size(32)>>
{:ndr, false} -> <<0xC0000003::little-integer-size(32)>>
{:xdr, true} -> <<0xE0000003::big-integer-size(32)>>
{:ndr, true} -> <<0xE0000003::little-integer-size(32)>>
end
end
end | lib/geometry/polygon_zm.ex | 0.945538 | 0.657332 | polygon_zm.ex | starcoder |
defimpl JSON.Encoder, for: Tuple do
@doc """
Encodes an Elixir tuple into a JSON array
"""
def encode(term), do: term |> Tuple.to_list() |> JSON.Encoder.Helpers.enum_encode()
@doc """
Returns an atom that represents the JSON type for the term
"""
def typeof(_), do: :array
end
defimpl JSON.Encoder, for: HashDict do
@doc """
Encodes an Elixir HashDict into a JSON object
"""
def encode(dict), do: JSON.Encoder.Helpers.dict_encode(dict)
@doc """
Returns :object
"""
def typeof(_), do: :object
end
defimpl JSON.Encoder, for: List do
@doc """
Encodes an Elixir List into a JSON array
"""
def encode([]), do: {:ok, "[]"}
def encode(list) do
if Keyword.keyword?(list) do
JSON.Encoder.Helpers.dict_encode(list)
else
JSON.Encoder.Helpers.enum_encode(list)
end
end
@doc """
Returns an atom that represents the JSON type for the term
"""
def typeof([]), do: :array
def typeof(list) do
if Keyword.keyword?(list) do
:object
else
:array
end
end
end
defimpl JSON.Encoder, for: [Integer, Float] do
@doc """
Converts Elixir Integer and Floats into JSON Numbers
"""
# Elixir converts octal, etc into decimal when putting in strings
def encode(number), do: {:ok, "#{number}"}
@doc """
Returns an atom that represents the JSON type for the term
"""
def typeof(_), do: :number
end
defimpl JSON.Encoder, for: Atom do
@doc """
Converts Elixir Atoms into their JSON equivalents
"""
def encode(nil), do: {:ok, "null"}
def encode(false), do: {:ok, "false"}
def encode(true), do: {:ok, "true"}
def encode(atom) when is_atom(atom), do: atom |> Atom.to_string() |> JSON.Encoder.encode()
@doc """
Returns an atom that represents the JSON type for the term
"""
def typeof(boolean) when is_boolean(boolean), do: :boolean
def typeof(nil), do: :null
def typeof(atom) when is_atom(atom), do: :string
end
defimpl JSON.Encoder, for: BitString do
# 32 = ascii space, cleaner than using "? ", I think
@acii_space 32
@doc """
Converts Elixir String into JSON String
"""
def encode(bitstring), do: {:ok, <<?">> <> encode_binary_recursive(bitstring, []) <> <<?">>}
defp encode_binary_recursive(<<head::utf8, tail::binary>>, acc) do
encode_binary_recursive(tail, encode_binary_character(head, acc))
end
# stop cond
defp encode_binary_recursive(<<>>, acc), do: acc |> Enum.reverse() |> to_string
defp encode_binary_character(?", acc), do: [?", ?\\ | acc]
defp encode_binary_character(?\b, acc), do: [?b, ?\\ | acc]
defp encode_binary_character(?\f, acc), do: [?f, ?\\ | acc]
defp encode_binary_character(?\n, acc), do: [?n, ?\\ | acc]
defp encode_binary_character(?\r, acc), do: [?r, ?\\ | acc]
defp encode_binary_character(?\t, acc), do: [?t, ?\\ | acc]
defp encode_binary_character(?\\, acc), do: [?\\, ?\\ | acc]
defp encode_binary_character(char, acc) when is_number(char) and char < @acii_space do
encode_hexadecimal_unicode_control_character(char, [?u, ?\\ | acc])
end
# anything else besides these control characters, just let it through
defp encode_binary_character(char, acc) when is_number(char), do: [char | acc]
defp encode_hexadecimal_unicode_control_character(char, acc) when is_number(char) do
[
char
|> Integer.to_charlist(16)
|> zeropad_hexadecimal_unicode_control_character
|> Enum.reverse()
| acc
]
end
defp zeropad_hexadecimal_unicode_control_character([a, b, c]), do: [?0, a, b, c]
defp zeropad_hexadecimal_unicode_control_character([a, b]), do: [?0, ?0, a, b]
defp zeropad_hexadecimal_unicode_control_character([a]), do: [?0, ?0, ?0, a]
defp zeropad_hexadecimal_unicode_control_character(iolist) when is_list(iolist), do: iolist
@doc """
Returns an atom that represents the JSON type for the term
"""
def typeof(_), do: :string
end
defimpl JSON.Encoder, for: Record do
@doc """
Encodes elixir records into json objects
"""
def encode(record), do: record.to_keywords |> JSON.Encoder.Helpers.dict_encode()
@doc """
Encodes a record into a JSON object
"""
def typeof(_), do: :object
end
defimpl JSON.Encoder, for: Map do
@doc """
Encodes maps into object
"""
def encode(map), do: map |> JSON.Encoder.Helpers.dict_encode()
@doc """
Returns an atom that represents the JSON type for the term
"""
def typeof(_), do: :object
end
defimpl JSON.Encoder, for: Any do
@moduledoc """
Falllback module for encoding any other values
"""
@doc """
Encodes a map into a JSON object
"""
def encode(%{} = struct) do
struct
|> Map.to_list()
|> JSON.Encoder.Helpers.dict_encode()
end
@doc """
Fallback method
"""
def encode(x) do
x
|> Kernel.inspect()
|> JSON.Encoder.encode()
end
@doc """
Fallback method
"""
def typeof(struct) when is_map(struct), do: :object
def typeof(_), do: :string
end | lib/json/encoder/default_implementations.ex | 0.852091 | 0.553566 | default_implementations.ex | starcoder |
defmodule Securion do
@moduledoc """
Elixir client library to the [SecurionPay](https://securionpay.com) payment
gateway REST APIs.
> Please refer to the [docs](https://securionpay.com/docs/api) of the original REST
APIs when in doubt. This client library is a thin wrapper around it and
most details are left unchanged.
However, some API shortcuts (e.g. on-the-go card creation for new
subscriptions, see the
[https://securionpay.com/docs/api#subscription-create](docs))
were removed in favor of simpler, composable APIs.
`securion` doesn't fully cover the original APIs yet, possibly ever. Only
core features are available. Most notably, subscriptions, plans, and events
are *not* supported.
## Installation
Add `securion` as dependency in `mix.exs`:
defp deps do
[
{:securion, "~> x.y.z"}
]
end
## Configuration
You must provide your SecurionPay API secret key. The public key is **not** required.
# config/config.exs
config :securion,
secret_key: "sk_ ..."
You should also run your tests with test-only API keys, like so:
# config/test.exs
config :securion,
secret_key: "sk_test_ ..."
## Overview
- `Securion.Customer` - Customers APIs.
- `Securion.Card` - Payment cards APIs.
- `Securion.Token` - One-time only tokens for safely handling payment
cards.
- `Securion.Charge` - Transactions APIs.
"""
import Securion.Resource
@default_limit 10
@doc """
Fetches a page of SecurionPay resources (`list`). `limit`
determines the maximum number of resources.
"""
def fetch_list(list, limit \\ @default_limit) do
get(list.path, list.params ++ [limit: limit])
end
@doc """
Fetches a page of SecurionPay resources (`list`). `limit`
determines the maximum number of resources.
**Only resources that follow `cursor` will be selected.**
"""
def fetch_list_after(list, cursor, limit \\ @default_limit) do
get(list.path, list.params ++ [limit: limit, startingAfterId: cursor])
end
@doc """
Fetches a page of SecurionPay resources (`list`). `limit`
determines the maximum number of resources.
**Only resources that precede `cursor` will be selected.**
"""
def fetch_list_before(list, cursor, limit \\ @default_limit) do
get(list.path, list.params ++ [limit: limit, startingBeforeId: cursor])
end
end | lib/securion.ex | 0.766162 | 0.545346 | securion.ex | starcoder |
defmodule Typo.PDF.Annotation do
@moduledoc """
Functions to generate PDF annotations.
"""
import Typo.Utils.Guards
alias Typo.PDF
alias Typo.PDF.{IdMap, PageUse}
alias Typo.Utils.Colour
# decodes border style options, returning a border dictionary.
@spec border_style(Keyword.t(), atom(), atom()) :: map()
defp border_style(options, style_atom, width_atom) do
width = Keyword.get(options, width_atom, 1)
is_number(width) || raise(ArgumentError, "invalid border width: #{inspect(width)}")
style =
case Keyword.get(options, style_atom, :none) do
:none -> %{}
:beveled -> %{"S" => "B"}
:inset -> %{"S" => "I"}
:solid -> %{"S" => "S"}
:underline -> %{"S" => "U"}
other -> raise ArgumentError, "invalid border style: #{inspect(other)}"
end
%{"W" => width} |> Map.merge(style)
end
# given colour tuple/name returns map with "C" key containg decoded colour as value.
@spec colour_map(Typo.colour()) :: map()
defp colour_map(colour) do
if colour == :none do
%{}
else
case Colour.decode(colour) do
g when is_number(g) -> %{"C" => [g]}
{r, g, b} -> %{"C" => [r, g, b]}
{c, m, y, k} -> %{"C" => [c, m, y, k]}
end
end
end
# converts highlight atom to string.
defp highlight(highlight) do
case highlight do
:none -> "None"
:invert -> "Invert"
:outline -> "Outline"
:push -> "Push"
end
end
@doc """
Add a link annotation that points to a previously defined destination
(added using `add_destination/3`).
`rect` is a rectangle which specifies the lower left and top right of
the active link area.
`options` is a keyword list containing any of the following options:
* `:highlight` - the link's highlighting mode, which can be one of:
* `:none` - no highlighting.
* `:invert` - inverts the contents of the link rectangle (default).
* `:outline` - inverts the border of the link rectangle.
* `:push` - display the annotation as if it was being below the surface
of the page.
* `:style` - specifies annotation border style:
* `:none` - no border is drawn (default).
* `:beveled` - an embossed rectangle is drawn around the annotation.
* `:inset` - a recessed rectangle is drawn around the annotation.
* `:solid` - a solid rectangle is drawn around the annotation.
* `:underline` - a single line is drawn around the bottom of the annotation.
* `:width` - specifies the thickness of the border to draw.
* `:colour` - specifies the link annotation colour (may also be spelt `:color`).
Specify `:none` as the colour name to produce a transparent border.
"""
@spec link(PDF.t(), Typo.id(), Typo.rectangle(), Keyword.t()) :: PDF.t()
def link(%PDF{} = pdf, destination, rect, options)
when is_id(destination) and is_rectangle(rect) and is_list(options) do
IdMap.has_id?(pdf.destinations, destination) ||
raise(ArgumentError, "unknown destination: #{inspect(destination)}")
dest = IdMap.get_item!(pdf.destinations, destination)
colour_name = Keyword.get(options, :colour) || Keyword.get(options, :color) || "#000"
colour = colour_map(colour_name)
highlight = highlight(Keyword.get(options, :highlight, :invert))
annot =
%{
"Type" => "Annot",
"Subtype" => "Link",
"BS" => border_style(options, :style, :width),
"Dest" => {:dest, dest},
"H" => highlight,
"Rect" => Tuple.to_list(rect)
}
|> Map.merge(colour)
register_annotation(pdf, annot)
end
# registers an annotation map.
@spec register_annotation(PDF.t(), map()) :: PDF.t()
defp register_annotation(%PDF{} = pdf, annotation) do
annotations = IdMap.add_item_nox(pdf.annotations, annotation)
annotation_id = IdMap.get_last_int_id(annotations)
page = {:page, pdf.user_page}
page_annot_use = PageUse.add_item(pdf.page_annotation_use, page, annotation_id)
%PDF{pdf | annotations: annotations, page_annotation_use: page_annot_use}
end
end | lib/typo/pdf/annotation.ex | 0.860442 | 0.421939 | annotation.ex | starcoder |
defmodule ElxValidation.Different do
@moduledoc """
### different:value
- The field under validation must have a different value than field.
### equal:value
- The field under validation must be equal to the given field. The two fields must be of the same type.
Strings and numerics are evaluated using the same conventions as the size rule.
### gt:value
- The field under validation must be greater than the given field. The two fields must be of the same type. Strings and numerics are evaluated using the same conventions as the size rule.
### gte:value
- The field under validation must be greater than or equal to the given field. The two fields must be of the same type.
Strings and numerics are evaluated using the same conventions as the size rule.
### lt:value
- The field under validation must be less than the given field. The two fields must be of the same type. Strings and numerics are evaluated using the same conventions as the size rule.
### lte:value
- The field under validation must be less than or equal to the given field. The two fields must be of the same type.
Strings and numerics are evaluated using the same conventions as the size rule.
### examples
```
data = %{
num_diff: 234 --> not be 233
str_diff: "MQZVD" --> not be ABCD
}
rules = [
%{
field: "num_diff",
validate: ["different:233"]
},
%{
field: "str_diff",
validate: ["different:ABCD"]
}
]
```
***
```
data = %{
num_eq: 100, --> must be 100
str_eq: "abcd" --> must be "abcd"
}
rules = [
%{
field: "num_eq",
validate: ["equal:100"]
},
%{
field: "str_eq",
validate: ["equal:abcd"]
},
]
```
***
```
data = %{
num_gt: 101, --> greater than 100
num_gte: 200, --> greater than or equal 200
str_gt: "abcd", --> length of this must greater than length of abc(3 char)
str_gte: "abcd" --> length of this must greater than or equal length of abc(3 char)
}
rules = [
%{
field: "num_gt",
validate: ["gt:100"]
},
%{
field: "num_gte",
validate: ["gte:200"]
},
%{
field: "str_gt",
validate: ["gt:abc"]
},
%{
field: "str_gte",
validate: ["gte:abc"]
},
]
```
***
```
data = %{
num_lt: 99, --> less than 100
num_lte: 199, --> less than or equal 200
str_lt: "ab", --> length of this must less than length of abc(3char)
str_lte: "abcd" --> length of this must less than length of abcde(5 char)
}
rules = [
%{
field: "num_lt",
validate: ["lt:100"]
},
%{
field: "num_lte",
validate: ["lte:200"]
},
%{
field: "str_lt",
validate: ["lt:ABC"]
},
%{
field: "str_lte",
validate: ["lte:ABCDE"]
},
]
```
"""
@doc """
target has to different with value
"""
def is_different(target, value) do
cond do
is_number(target) -> String.to_integer(value) != target
is_bitstring(target) -> value != target
true -> false
end
rescue
_ ->
false
end
@doc """
target has to equal to value
"""
def equal(target, value) do
cond do
is_number(target) -> String.to_integer(value) == target
is_bitstring(target) -> value == target
true -> false
end
rescue
_ ->
false
end
@doc """
target has to greater than value
"""
def gt(target, value) do
cond do
is_number(target) -> String.to_integer(value) < target
is_bitstring(target) -> String.length(value) < String.length(target)
true -> false
end
rescue
_ ->
false
end
@doc """
target has to equal or greater than value
"""
def gte(target, value) do
cond do
is_number(target) -> String.to_integer(value) <= target
is_bitstring(target) -> String.length(value) <= String.length(target)
true -> false
end
rescue
_ ->
false
end
@doc """
target has to less than value
"""
def lt(target, value) do
cond do
is_number(target) -> String.to_integer(value) > target
is_bitstring(target) -> String.length(value) > String.length(target)
true -> false
end
rescue
_ ->
false
end
@doc """
target has to equal or less than value
"""
def lte(target, value) do
cond do
is_number(target) -> String.to_integer(value) >= target
is_bitstring(target) -> String.length(value) >= String.length(target)
true -> false
end
rescue
_ ->
false
end
end | lib/rules/different.ex | 0.922596 | 0.945147 | different.ex | starcoder |
defmodule IO.ANSI.Sequence do
@moduledoc false
defmacro defsequence(name, code // "", terminator // "m") do
quote bind_quoted: [name: name, code: code, terminator: terminator] do
def unquote(name)() do
"\e[#{unquote(code)}#{unquote(terminator)}"
end
defp escape_sequence(<< unquote(atom_to_binary(name)), rest :: binary >>) do
{ "\e[#{unquote(code)}#{unquote(terminator)}", rest }
end
end
end
end
defmodule IO.ANSI do
@moduledoc """
Functionality to render ANSI escape sequences
(http://en.wikipedia.org/wiki/ANSI_escape_code) — characters embedded
in text used to control formatting, color, and other output options
on video text terminals.
"""
import IO.ANSI.Sequence
@doc """
Checks whether the default I/O device is a terminal or a file.
Used to identify whether printing ANSI escape sequences will likely
be displayed as intended.
"""
@spec terminal? :: boolean
@spec terminal?(:io.device) :: boolean
def terminal?(device // :erlang.group_leader) do
match?({:ok, _}, :io.columns(device))
end
@doc "Resets all attributes"
defsequence :reset, 0
@doc "Bright (increased intensity) or Bold"
defsequence :bright, 1
@doc "Faint (decreased intensity), not widely supported"
defsequence :faint, 2
@doc "Italic: on. Not widely supported. Sometimes treated as inverse."
defsequence :italic, 3
@doc "Underline: Single"
defsequence :underline, 4
@doc "Blink: Slow. Less than 150 per minute"
defsequence :blink_slow, 5
@doc "Blink: Rapid. MS-DOS ANSI.SYS; 150 per minute or more; not widely supported"
defsequence :blink_rapid, 6
@doc "Image: Negative. Swap foreground and background"
defsequence :inverse, 7
@doc "Image: Negative. Swap foreground and background"
defsequence :reverse, 7
@doc "Conceal. Not widely supported"
defsequence :conceal, 8
@doc "Crossed-out. Characters legible, but marked for deletion. Not widely supported."
defsequence :crossed_out, 9
@doc "Sets primary (default) font"
defsequence :primary_font, 10
lc font_n inlist [1, 2, 3, 4, 5, 6, 7, 8, 9] do
@doc "Sets alternative font #{font_n}"
defsequence :"font_#{font_n}", font_n + 10
end
@doc "Normal color or intensity"
defsequence :normal, 22
@doc "Not italic"
defsequence :not_italic, 23
@doc "Underline: None"
defsequence :no_underline, 24
@doc "Blink: off"
defsequence :blink_off, 25
colors = [:black, :red, :green, :yellow, :blue, :magenta, :cyan, :white]
colors = Enum.zip(0..(length(colors)-1), colors)
lc { code, color } inlist colors do
@doc "Sets foreground color to #{color}"
defsequence color, code + 30
@doc "Sets background color to #{color}"
defsequence :"#{color}_background", code + 40
end
@doc "Default text color"
defsequence :default_color, 39
@doc "Default background color"
defsequence :default_background, 49
@doc "Framed"
defsequence :framed, 51
@doc "Encircled"
defsequence :encircled, 52
@doc "Overlined"
defsequence :overlined, 53
@doc "Not framed or encircled"
defsequence :not_framed_encircled, 54
@doc "Not overlined"
defsequence :not_overlined, 55
@doc "Send cursor home"
defsequence :home, "", "H"
@doc "Clear screen"
defsequence :clear, "2", "J"
# Catch spaces between codes
defp escape_sequence(<< ?\s, rest :: binary >>) do
escape_sequence(rest)
end
defp escape_sequence(other) do
[spec|_] = String.split(other, %r/(,|\})/)
raise ArgumentError, message: "invalid ANSI sequence specification: #{spec}"
end
@doc %S"""
Escapes a string by converting named ANSI sequences into actual ANSI codes.
The format for referring to sequences is `%{red}` and `%{red,bright}` (for
multiple sequences).
It will also append a `%{reset}` to the string. If you don't want this
behaviour, use `escape_fragment/2`.
An optional boolean parameter can be passed to enable or disable
emitting actual ANSI codes. When `false`, no ANSI codes will emitted.
By default, standard output will be checked if it is a terminal capable
of handling these sequences (using `terminal?/1` function)
## Examples
iex> IO.ANSI.escape("Hello %{red,bright,green}yes", true)
"Hello \e[31m\e[1m\e[32myes\e[0m"
"""
@spec escape(String.t, emit :: boolean) :: String.t
def escape(string, emit // terminal?) do
{rendered, emitted} = do_escape(string, false, emit, false, [])
if emitted and emit do
rendered <> reset
else
rendered
end
end
@doc %S"""
Escapes a string by converting named ANSI sequences into actual ANSI codes.
The format for referring to sequences is `%{red}` and `%{red,bright}` (for
multiple sequences).
An optional boolean parameter can be passed to enable or disable
emitting actual ANSI codes. When `false`, no ANSI codes will emitted.
By default, standard output will be checked if it is a terminal capable
of handling these sequences (using `terminal?/1` function)
## Examples
iex> IO.ANSI.escape_fragment("Hello %{red,bright,green}yes", true)
"Hello \e[31m\e[1m\e[32myes"
iex> IO.ANSI.escape_fragment("%{reset}bye", true)
"\e[0mbye"
"""
@spec escape_fragment(String.t, emit :: boolean) :: String.t
def escape_fragment(string, emit // terminal?) do
{rendered, _emitted} = do_escape(string, false, emit, false, [])
rendered
end
defp do_escape(<< ?%, ?{, rest :: binary >>, false, emit, _emitted, acc) do
do_escape_sequence(rest, emit, acc)
end
defp do_escape(<< ?,, rest :: binary >>, true, emit, _emitted, acc) do
do_escape_sequence(rest, emit, acc)
end
defp do_escape(<< ?\s, rest :: binary >>, true, emit, emitted, acc) do
do_escape(rest, true, emit, emitted, acc)
end
defp do_escape(<< ?}, rest :: binary >>, true, emit, emitted, acc) do
do_escape(rest, false, emit, emitted, acc)
end
defp do_escape(<< x :: [binary, size(1)], rest :: binary>>, false, emit, emitted, acc) do
do_escape(rest, false, emit, emitted, [x|acc])
end
defp do_escape("", false, _emit, emitted, acc) do
{iolist_to_binary(Enum.reverse(acc)), emitted}
end
defp do_escape_sequence(rest, emit, acc) do
{code, rest} = escape_sequence(rest)
if emit do
acc = [code|acc]
end
do_escape(rest, true, emit, true, acc)
end
end | lib/elixir/lib/io/ansi.ex | 0.744378 | 0.508971 | ansi.ex | starcoder |
defmodule Bundlex.CNode do
@moduledoc """
Utilities to ease interaction with Bundlex-based CNodes, so they can be treated
more like Elixir processes / `GenServer`s.
"""
use Bunch
alias Bundlex.Helper.MixHelper
@enforce_keys [:server, :node]
defstruct @enforce_keys
@typedoc """
Reference to the CNode.
Consists of pid of CNode's associated server and CNode name.
"""
@type t :: %__MODULE__{
server: pid,
node: node
}
@type on_start_t :: {:ok, t} | {:error, :spawn_cnode | :connect_to_cnode}
@doc """
Spawns and connects to CNode `cnode_name` from application of calling module.
See `#{inspect(__MODULE__)}.start_link/2` for more details.
"""
defmacro start_link(native_name) do
app = MixHelper.get_app!(__CALLER__.module)
quote do
unquote(__MODULE__).start_link(unquote(app), unquote(native_name))
end
end
@doc """
Spawns and connects to CNode `cnode_name` from application of calling module.
See `#{inspect(__MODULE__)}.start/2` for more details.
"""
defmacro start(native_name) do
app = MixHelper.get_app!(__CALLER__.module)
quote do
unquote(__MODULE__).start(unquote(app), unquote(native_name))
end
end
@doc """
Spawns and connects to CNode `cnode_name` from application `app`.
The CNode is passed the following command line arguments:
- host name,
- alive name,
- node name,
- creation number.
After CNode startup, these parameters should be passed to
[`ei_connect_xinit`](http://erlang.org/doc/man/ei_connect.html#ei_connect_xinit)
function, and CNode should be published and await connection. Once the CNode is
published, it should print a line starting with `ready` to the standard output
**and flush the standard output** to avoid the line being buffered.
Under the hood, this function starts an associated server, which is responsible
for monitoring the CNode and monitoring calling process to be able to do proper
cleanup upon a crash. On startup, the server does the following:
1. Makes current node distributed if it is not done yet (see `Node.start/3`).
1. Assigns CNode a unique name.
1. Starts CNode OS process using `Port.open/2`.
1. Waits (at most 5 seconds) until a line `ready` is printed out
(this line is captured and not forwarded to the stdout).
1. Connects to the CNode.
The erlang cookie is passed using the BUNDLEX_ERLANG_COOKIE an environment variable.
"""
@spec start_link(app :: atom, native_name :: atom) :: on_start_t
def start_link(app, native_name) do
do_start(app, native_name, true)
end
@doc """
Works the same way as `start_link/2`, but does not link to CNode's associated
server.
"""
@spec start(app :: atom, native_name :: atom) :: on_start_t
def start(app, native_name) do
do_start(app, native_name, false)
end
defp do_start(app, native_name, link?) do
{:ok, pid} =
GenServer.start(
__MODULE__.Server,
%{app: app, native_name: native_name, caller: self(), link?: link?}
)
receive do
{^pid, res} -> res
end
end
@doc """
Disconnects from CNode.
It is the responsibility of the CNode to exit upon connection loss.
"""
@spec stop(t) :: :ok | {:error, :disconnect_cnode}
def stop(%__MODULE__{server: server}) do
GenServer.call(server, :stop)
end
@doc """
Starts monitoring CNode from the calling process.
"""
@spec monitor(t) :: reference
def monitor(%__MODULE__{server: server}) do
Process.monitor(server)
end
@doc """
Makes a synchronous call to CNode and waits for its reply.
The CNode is supposed to send back a `{cnode, response}` tuple where `cnode`
is the node name of CNode. If the response doesn't come in within `timeout`,
error is raised.
Messages are exchanged directly (without interacting with CNode's associated
server).
"""
@spec call(t, message :: term, timeout :: non_neg_integer | :infinity) :: response :: term
def call(%__MODULE__{node: node}, message, timeout \\ 5000) do
Kernel.send({:any, node}, message)
receive do
{^node, response} -> response
after
timeout -> raise "Timeout upon call to the CNode #{inspect(node)}"
end
end
@doc """
Sends a message to cnode.
The message is exchanged directly (without interacting with CNode's associated
server).
"""
@spec send(t, message :: term) :: :ok
def send(%__MODULE__{node: node}, message) do
Kernel.send({:any, node}, message)
:ok
end
end | lib/bundlex/cnode.ex | 0.852767 | 0.448728 | cnode.ex | starcoder |
defmodule DBux.Type do
@type simple_type_name :: :byte | :boolean | :int16 | :uint16 | :int32 | :uint32 | :int64 | :uint64 | :double | :string | :object_path | :signature | :unix_fd
@type container_type_name :: :array | :struct | :variant | :dict_entry
@type simple_type :: simple_type_name
@type container_type :: {container_type_name, [simple_type | container_type]}
@type t :: simple_type | container_type
@type list_of_types :: [] | [t]
@doc """
Returns bitstring that contains 1-byte D-Bus signature of given type.
Reverse function is `type/1`.
"""
@spec signature(simple_type | :variant) :: String.t
def signature(:byte), do: "y"
def signature(:boolean), do: "b"
def signature(:int16), do: "n"
def signature(:uint16), do: "q"
def signature(:int32), do: "i"
def signature(:uint32), do: "u"
def signature(:int64), do: "x"
def signature(:uint64), do: "t"
def signature(:double), do: "d"
def signature(:string), do: "s"
def signature(:object_path), do: "o"
def signature(:signature), do: "g"
def signature(:unix_fd), do: "h"
def signature(:variant), do: "v"
def signature(%DBux.Value{type: type}), do: signature(type)
@doc """
Returns atom that contains atom identifying type.
Reverse function is `signature/1`.
"""
@spec type(String.t) :: simple_type | :variant
def type("y"), do: :byte
def type("b"), do: :boolean
def type("n"), do: :int16
def type("q"), do: :uint16
def type("i"), do: :int32
def type("u"), do: :uint32
def type("x"), do: :int64
def type("t"), do: :uint64
def type("d"), do: :double
def type("s"), do: :string
def type("o"), do: :object_path
def type("g"), do: :signature
def type("h"), do: :unix_fd
def type("v"), do: :variant
@doc """
Parses signature in D-Bus format and returns it as a nested list in which
simple types are represented as atoms and container types as tuples.
For example, "yba{s(ui)}" will become `[:byte, :boolean, {:array, [{:dict, [:string, {:struct, [:uint32, :int32]}]}]}]`.
First of all, it is much more convenient to have such structure if you want
to recursively parse signature in Elixir, so it is used internally while
demarshalling messages. It can also serve as validator for signatures.
It returns `{:ok, list}` in case of success, `{:error, reason}` otherwise.
It does most of the checks from the specification, but it does not check
for dicts constraints at the moment.
"""
@spec type_from_signature(String.t) :: list_of_types
def type_from_signature(""), do: {:ok, []}
def type_from_signature(signature) when is_binary(signature) do
parse(signature, [])
end
@doc """
Returns alignment size for given D-Bus type.
"""
@spec align_size(simple_type_name | container_type_name) :: number
def align_size(:byte), do: 1
def align_size(:boolean), do: 4
def align_size(:int16), do: 2
def align_size(:uint16), do: 2
def align_size(:int32), do: 4
def align_size(:uint32), do: 4
def align_size(:int64), do: 8
def align_size(:uint64), do: 8
def align_size(:double), do: 8
def align_size(:string), do: 4
def align_size(:object_path), do: 4
def align_size(:signature), do: 1
def align_size(:array), do: 4
def align_size(:struct), do: 8
def align_size(:variant), do: 1
def align_size(:dict_entry), do: 8
def align_size(:unix_fd), do: 4
# Computes padding size for container types.
# It just takes container type, and ignores inner type.
@doc false
def compute_padding_size(length, type) when is_tuple(type) do
{subtype_major, _} = type
compute_padding_size(length, subtype_major)
end
# Computes padding size for a type, given data length and type name.
@doc false
def compute_padding_size(length, type) when is_atom(type) do
compute_padding_size(length, DBux.Type.align_size(type))
end
# Computes padding size for a type, given data length and target padding.
@doc false
def compute_padding_size(length, align) when is_number(align) do
padding = rem(length, align)
case padding do
0 -> 0
_ -> align - padding
end
end
# ------ TOP LEVEL ------
# Top level: End of signature, return
defp parse(<< >>, acc) do
{:ok, acc}
end
# Top level: Enter inner recurrence for struct
defp parse(<< "(", rest :: binary >>, acc) do
case parse_struct(rest, []) do
{:ok, value_parsed, rest_after_parse} ->
parse(rest_after_parse, acc ++ [value_parsed])
{:error, reason} ->
{:error, reason}
end
end
# Top level: Got struct closing token but it wasn't opened
defp parse(<< ")", _rest :: binary >>, _acc) do
{:error, {:badsignature, :unmatchedstruct}}
end
# Top level: Attempt to enter inner recurrence for dict without enclosing array
defp parse(<< "{", _rest :: binary >>, _acc) do
{:error, {:badsignature, :unwrappeddict}}
end
# Top level: Got dict closing token but it wasn't opened
defp parse(<< "}", _rest :: binary >>, _acc) do
{:error, {:badsignature, :unmatcheddict}}
end
# Top level: Enter inner recurrence for array
defp parse(<< "a", rest :: binary >>, acc) do
case parse_array(rest, []) do
{:ok, value_parsed, rest_after_parse} ->
parse(rest_after_parse, acc ++ [value_parsed])
{:error, reason} ->
{:error, reason}
end
end
# Top level: Simple types
defp parse(<< head :: binary-size(1), rest :: binary >>, acc) do
parse(rest, acc ++ [type(head)])
end
# ------ STRUCT ------
# Within struct: Enter inner recurrence for another struct
defp parse_struct(<< "(", rest :: binary >>, acc) do
case parse_struct(rest, []) do
{:ok, value_parsed, rest_after_parse} ->
parse_struct(rest_after_parse, acc ++ [value_parsed])
{:error, reason} ->
{:error, reason}
end
end
# Within struct: Closing empty struct
defp parse_struct(<< ")", _rest :: binary >>, []) do
{:error, {:badsignature, :emptystruct}}
end
# Within struct: Closing non-empty struct, return
defp parse_struct(<< ")", rest :: binary >>, acc) do
{:ok, {:struct, acc}, rest}
end
# Within struct: Attempt to enter inner recurrence for dict without enclosing array
defp parse_struct(<< "{", _rest :: binary >>, _acc) do
{:error, {:badsignature, :unwrappeddict}}
end
# Within struct: Got dict closing token but it wasn't opened
defp parse_struct(<< "}", _rest :: binary >>, _acc) do
{:error, {:badsignature, :unmatcheddict}}
end
# Within struct: Enter inner recurrence for array
defp parse_struct(<< "a", rest :: binary >>, acc) do
case parse_array(rest, []) do
{:ok, value_parsed, rest_after_parse} ->
parse_struct(rest_after_parse, acc ++ [value_parsed])
{:error, reason} ->
{:error, reason}
end
end
# Within struct: Struct has no contents
defp parse_struct(<< >>, _acc) do
{:error, {:badsignature, :unmatchedstruct}}
end
# Within struct: Simple types
defp parse_struct(<< head :: binary-size(1), rest :: binary >>, acc) do
parse_struct(rest, acc ++ [type(head)])
end
# ------ DICT ------
# Within dict: Attempt to enter inner recurrence for dict without enclosing array
defp parse_dict(<< "{", _rest :: binary >>, _acc) do
{:error, {:badsignature, :unwrappeddict}}
end
# Within dict: Closing empty dict
defp parse_dict(<< "}", _rest :: binary >>, []) do
{:error, {:badsignature, :emptydict}}
end
# Within dict: Closing non-empty dict, return
defp parse_dict(<< "}", rest :: binary >>, acc) do
{:ok, {:dict, acc}, rest}
end
# Within dict: Enter inner recurrence for struct
defp parse_dict(<< "(", rest :: binary >>, acc) do
case parse_struct(rest, []) do
{:ok, value_parsed, rest_after_parse} ->
parse_dict(rest_after_parse, acc ++ [value_parsed])
{:error, reason} ->
{:error, reason}
end
end
# Within dict: Got struct closing token but it wasn't opened
defp parse_dict(<< ")", _rest :: binary >>, _acc) do
{:error, {:badsignature, :unmatchedstruct}}
end
# Within dict: Enter inner recurrence for array
defp parse_dict(<< "a", rest :: binary >>, acc) do
case parse_array(rest, []) do
{:ok, value_parsed, rest_after_parse} ->
parse_dict(rest_after_parse, acc ++ [value_parsed])
{:error, reason} ->
{:error, reason}
end
end
# Within dict: Dict has no contents
defp parse_dict(<< >>, _acc) do
{:error, {:badsignature, :unmatcheddict}}
end
# Within dict: Simple types
defp parse_dict(<< head :: binary-size(1), rest :: binary >>, acc) do
parse_dict(rest, acc ++ [type(head)])
end
# ------ ARRAY ------
# Within array: Enter inner recurrence for struct
defp parse_array(<< "(", rest :: binary >>, acc) do
case parse_struct(rest, []) do
{:ok, value_parsed, rest_after_parse} ->
{:ok, {:array, acc ++ [value_parsed]}, rest_after_parse}
{:error, reason} ->
{:error, reason}
end
end
# Within array: Enter inner recurrence for dict
defp parse_array(<< "{", rest :: binary >>, acc) do
case parse_dict(rest, []) do
{:ok, value_parsed, rest_after_parse} ->
{:ok, {:array, acc ++ [value_parsed]}, rest_after_parse}
{:error, reason} ->
{:error, reason}
end
end
# Within array: Got struct closing token but it wasn't opened
defp parse_array(<< ")", _rest :: binary >>, _acc) do
{:error, {:badsignature, :unmatchedstruct}}
end
# Within array: Got dict closing token but it wasn't opened
defp parse_array(<< "}", _rest :: binary >>, _acc) do
{:error, {:badsignature, :unmatcheddict}}
end
# Within array: Enter inner recurrence for another array
defp parse_array(<< "a", rest :: binary >>, acc) do
case parse_array(rest, []) do
{:ok, value_parsed, rest_after_parse} ->
parse_array(rest_after_parse, acc ++ [value_parsed])
{:error, reason} ->
{:error, reason}
end
end
# Within array: Empty array
defp parse_array(<< >>, []) do
{:error, {:badsignature, :emptyarray}}
end
# Within array: Container types, return
defp parse_array(<< >>, acc) do
{:ok, {:array, acc}, << >>}
end
# Within array: Simple types, return
defp parse_array(<< head :: binary-size(1), rest :: binary >>, acc) do
{:ok, {:array, acc ++ [type(head)]}, rest}
end
end | lib/type.ex | 0.710628 | 0.42931 | type.ex | starcoder |
defmodule USGovData.Parsers.CommitteeMaster do
defstruct([
:address1,
:address2,
:candidate,
:city,
:connected_org,
:designation,
:filing_frequency,
:id,
:name,
:org_category,
:party,
:state,
:treasurer,
:type,
:zip_code
])
@type committee_designation ::
:candidate_authorized
| :lobbyist_pac
| :leadership_pac
| :joint_fundraiser
| :principal_committee
| :unauthorized
| {:unknown_code, String.t()}
@type filing_frequency ::
:admin_terminated
| :debt
| :monthly
| :quarterly
| :terminated
| :waived
| {:unknown_frequency, String.t()}
@type org_category ::
:corp
| :labor
| :membership
| :trade_assoc
| :coop
| :corp_no_cap_stock
| {:unknown_category, String.t()}
@type t :: %__MODULE__{
address1: String.t(),
address2: String.t(),
candidate: String.t(),
city: String.t(),
connected_org: String.t(),
designation: committee_designation() | nil,
filing_frequency: filing_frequency() | nil,
id: String.t(),
name: String.t(),
org_category: org_category() | nil,
party: String.t(),
state: String.t(),
treasurer: String.t(),
type: String.t(),
zip_code: String.t()
}
@doc """
Parses a line from a committee master FEC data file
"""
@spec parse_line(line :: String.t()) :: {:ok, __MODULE__.t()} | {:error, atom}
def parse_line(line) do
case :csv_parser.scan_and_parse(line) do
{:ok, fields} ->
case length(fields) do
15 ->
%__MODULE__{
address1: Enum.at(fields, name2off(:address1)),
address2: Enum.at(fields, name2off(:address2)),
candidate: Enum.at(fields, name2off(:candidate)),
city: Enum.at(fields, name2off(:city)),
connected_org: Enum.at(fields, name2off(:connected_org)),
designation: Enum.at(fields, name2off(:designation)),
filing_frequency: Enum.at(fields, name2off(:filing_frequency)),
id: Enum.at(fields, name2off(:id)),
name: Enum.at(fields, name2off(:name)),
org_category: Enum.at(fields, name2off(:org_category)),
party: Enum.at(fields, name2off(:party)),
state: Enum.at(fields, name2off(:state)),
treasurer: Enum.at(fields, name2off(:treasurer)),
type: Enum.at(fields, name2off(:type)),
zip_code: Enum.at(fields, name2off(:zip_code))
}
|> validate
_ ->
{:error, :bad_field_count}
end
error ->
error
end
end
defp name2off(:id), do: 0
defp name2off(:name), do: 1
defp name2off(:treasurer), do: 2
defp name2off(:address1), do: 3
defp name2off(:address2), do: 4
defp name2off(:city), do: 5
defp name2off(:state), do: 6
defp name2off(:zip_code), do: 7
defp name2off(:designation), do: 8
defp name2off(:type), do: 9
defp name2off(:party), do: 10
defp name2off(:filing_frequency), do: 11
defp name2off(:org_category), do: 12
defp name2off(:connected_org), do: 13
defp name2off(:candidate), do: 14
defp translate_designation("A"), do: :candidate_authorized
defp translate_designation("B"), do: :lobbyist_pac
defp translate_designation("D"), do: :leadership_pac
defp translate_designation("J"), do: :joint_fundraiser
defp translate_designation("P"), do: :principal_committee
defp translate_designation("U"), do: :unauthorized
defp translate_designation(nil), do: nil
defp translate_designation(d) when is_binary(d), do: {:unknown_code, d}
defp translate_frequency("A"), do: :admin_terminated
defp translate_frequency("D"), do: :debt
defp translate_frequency("M"), do: :monthly
defp translate_frequency("Q"), do: :quarterly
defp translate_frequency("T"), do: :terminated
defp translate_frequency("W"), do: :waived
defp translate_frequency(nil), do: nil
defp translate_frequency(f) when is_binary(f), do: {:unknown_frequency, f}
defp translate_category("C"), do: :corp
defp translate_category("L"), do: :labor
defp translate_category("M"), do: :membership
defp translate_category("T"), do: :trade_assoc
defp translate_category("V"), do: :coop
defp translate_category("W"), do: :corp_no_cap_stock
defp translate_category(nil), do: nil
defp translate_category(c) when is_binary(c), do: {:unknown_category, c}
defp validate(%__MODULE__{address2: add2} = r) when is_integer(add2) do
validate(%{r | address2: "#{add2}"})
end
defp validate(%__MODULE__{zip_code: zc} = r) when is_integer(zc) do
validate(%{r | zip_code: "#{zc}"})
end
defp validate(
%__MODULE__{designation: d, filing_frequency: f, org_category: c, address2: add2} = r
) do
updated_d = translate_designation(d)
updated_f = translate_frequency(f)
updated_c = translate_category(c)
updated_add2 =
if add2 != nil and String.length(add2) < 2 do
nil
else
add2
end
{:ok,
%{
r
| designation: updated_d,
filing_frequency: updated_f,
org_category: updated_c,
address2: updated_add2
}}
end
end | lib/parsers/committee_master.ex | 0.752468 | 0.537952 | committee_master.ex | starcoder |
defmodule CaboCha do
@moduledoc """
Elixir bindings for CaboCha, a Japanese dependency structure analyzer.
Parse function resturns a list of map.
The map's keys meaning is a follows.
- `chunk`: 文節(Chunk) -- This is map which includes follows.
+ `id`: 文節id(Chunk id)
+ `link`: 係り先の文節id(Linked chunk id)
+ `rel`: 不明(Unknown)
+ `head`: 主辞の形態素id(Head morpheme id)
+ `func`: 機能語の形態素id(Function word morpheme id)
+ `score`: 係り関係のスコア(Relational score)
- `morphs`: 分節の中の単語の形態素のリスト(List of morpheme in chunk) -- The list have few maps which includes follows.
+ `id`: 形態素id(Morpheme id)
+ `surface`: 表層系(Surface)
+ `pos`: 品詞(Part of speech)
+ `pos1`: 品詞細分類1(Part of speech subcategory1)
+ `pos2`: 品詞細分類2(Part of speech subcategory2)
+ `pos3`: 品詞細分類3(Part of speech subcategory3)
+ `conjugation_form`: 活用形(Conjugation form)
+ `conjugation`: 活用形(conjucation)
+ `base`: 基本形・原型(Lexical form)
+ `yomi`: 読み(Reading)
+ `pronunciation`: 発音(Pronunciation)
"""
@doc """
Parse given string and returns CaboCha's list.
If read_from_file is true, The file is parsed using str as the filename
## Examples
```elixir
iex> CaboCha.parse("太郎は花子が読んでいる本を次郎に渡した")
[
[
%{
"chunk" => %{
"func" => 1,
"head" => 0,
"id" => 0,
"link" => 5,
"rel" => "D",
"score" => -0.742128
},
"morphs" => [
%{
"base" => "太郎",
"conjugation" => "",
"conjugation_form" => "",
"id" => 0,
"pos" => "名詞",
"pos1" => "固有名詞",
"pos2" => "人名",
"pos3" => "名",
"pronunciation" => "タロー",
"surface" => "太郎",
"yomi" => "タロウ"
},
%{
"base" => "は",
"conjugation" => "",
"conjugation_form" => "",
"id" => 1,
"pos" => "助詞",
"pos1" => "係助詞",
"pos2" => "",
"pos3" => "",
"pronunciation" => "ワ",
"surface" => "は",
"yomi" => "ハ"
}
]
},
%{
"chunk" => %{
"func" => 3,
"head" => 2,
"id" => 1,
"link" => 2,
"rel" => "D",
"score" => 1.700175
},
"morphs" => [
%{
"base" => "花子",
"conjugation" => "",
"conjugation_form" => "",
"id" => 2,
"pos" => "名詞",
"pos1" => "固有名詞",
"pos2" => "人名",
"pos3" => "名",
"pronunciation" => "ハナコ",
"surface" => "花子",
"yomi" => "ハナコ"
},
%{
"base" => "が",
"conjugation" => "",
"conjugation_form" => "",
"id" => 3,
"pos" => "助詞",
"pos1" => "格助詞",
"pos2" => "一般",
"pos3" => "",
"pronunciation" => "ガ",
"surface" => "が",
"yomi" => "ガ"
}
]
},
%{
"chunk" => %{
"func" => 6,
"head" => 4,
"id" => 2,
"link" => 3,
"rel" => "D",
"score" => 1.825021
},
"morphs" => [
%{
"base" => "読む",
"conjugation" => "連用タ接続",
"conjugation_form" => "五段・マ行",
"id" => 4,
"pos" => "動詞",
"pos1" => "自立",
"pos2" => "",
"pos3" => "",
"pronunciation" => "ヨン",
"surface" => "読ん",
"yomi" => "ヨン"
},
%{
"base" => "で",
"conjugation" => "",
"conjugation_form" => "",
"id" => 5,
"pos" => "助詞",
"pos1" => "接続助詞",
"pos2" => "",
"pos3" => "",
"pronunciation" => "デ",
"surface" => "で",
"yomi" => "デ"
},
%{
"base" => "いる",
"conjugation" => "基本形",
"conjugation_form" => "一段",
"id" => 6,
"pos" => "動詞",
"pos1" => "非自立",
"pos2" => "",
"pos3" => "",
"pronunciation" => "イル",
"surface" => "いる",
"yomi" => "イル"
}
]
},
%{
"chunk" => %{
"func" => 8,
"head" => 7,
"id" => 3,
"link" => 5,
"rel" => "D",
"score" => -0.742128
},
"morphs" => [
%{
"base" => "本",
"conjugation" => "",
"conjugation_form" => "",
"id" => 7,
"pos" => "名詞",
"pos1" => "一般",
"pos2" => "",
"pos3" => "",
"pronunciation" => "ホン",
"surface" => "本",
"yomi" => "ホン"
},
%{
"base" => "を",
"conjugation" => "",
"conjugation_form" => "",
"id" => 8,
"pos" => "助詞",
"pos1" => "格助詞",
"pos2" => "一般",
"pos3" => "",
"pronunciation" => "ヲ",
"surface" => "を",
"yomi" => "ヲ"
}
]
},
%{
"chunk" => %{
"func" => 11,
"head" => 10,
"id" => 4,
"link" => 5,
"rel" => "D",
"score" => -0.742128
},
"morphs" => [
%{
"base" => "次",
"conjugation" => "",
"conjugation_form" => "",
"id" => 9,
"pos" => "名詞",
"pos1" => "一般",
"pos2" => "",
"pos3" => "",
"pronunciation" => "ツギ",
"surface" => "次",
"yomi" => "ツギ"
},
%{
"base" => "郎",
"conjugation" => "",
"conjugation_form" => "",
"id" => 10,
"pos" => "名詞",
"pos1" => "一般",
"pos2" => "",
"pos3" => "",
"pronunciation" => "ロー",
"surface" => "郎",
"yomi" => "ロウ"
},
%{
"base" => "に",
"conjugation" => "",
"conjugation_form" => "",
"id" => 11,
"pos" => "助詞",
"pos1" => "格助詞",
"pos2" => "一般",
"pos3" => "",
"pronunciation" => "ニ",
"surface" => "に",
"yomi" => "ニ"
}
]
},
%{
"chunk" => %{
"func" => 13,
"head" => 12,
"id" => 5,
"link" => -1,
"rel" => "D",
"score" => 0.0
},
"morphs" => [
%{
"base" => "渡す",
"conjugation" => "連用形",
"conjugation_form" => "五段・サ行",
"id" => 12,
"pos" => "動詞",
"pos1" => "自立",
"pos2" => "",
"pos3" => "",
"pronunciation" => "ワタシ",
"surface" => "渡し",
"yomi" => "ワタシ"
},
%{
"base" => "た",
"conjugation" => "基本形",
"conjugation_form" => "特殊・タ",
"id" => 13,
"pos" => "助動詞",
"pos1" => "",
"pos2" => "",
"pos3" => "",
"pronunciation" => "タ",
"surface" => "た",
"yomi" => "タ"
}
]
}
]
]
```
"""
@spec parse(String.t, boolean()) :: [[Map.t, ...], ...]
def parse(str, read_from_file \\ false) do
command = case read_from_file do
true ->
"cabocha -f3 #{str}"
_ ->
"""
cat <<EOS.e42197de978c41367f46aa6429ed5c8e | cabocha -f3
#{str}
EOS.e42197de978c41367f46aa6429ed5c8e
"""
end
result = command
|> String.to_charlist
|> :os.cmd
|> List.to_string
|> String.trim
|> Floki.find("sentence")
result
|> Enum.map(fn {"sentence", [], info} ->
info
|> Enum.map(fn {_, chunk_info, toks} ->
chunk = Enum.reduce(chunk_info, %{}, fn({key, value}, acc) ->
case key do
"score" ->
Map.put(acc, key, String.to_float(value))
"rel" ->
Map.put(acc, key, value)
_ ->
Map.put(acc, key, String.to_integer(value))
end
end)
toks = create_toks(toks)
%{"chunk" => chunk, "morphs" => toks}
end)
end)
end
defp create_toks(toks) do
toks
|> Enum.map(fn {_, [{_, id}, {_, feature}], [surface]} ->
%{"id" => String.to_integer(id)}
|> Map.merge(%{"surface" => surface})
|> Map.merge(parse_feature(feature))
end)
end
defp parse_feature(feature) do
Regex.named_captures(~r/
^
(?:
(?<pos>[^,]+),
\*?(?<pos1>[^,]*),
\*?(?<pos2>[^,]*),
\*?(?<pos3>[^,]*),
\*?(?<conjugation_form>[^,]*),
\*?(?<conjugation>[^,]*),
(?<base>[^,]*)
(?:
,(?<yomi>[^,]*)
,(?<pronunciation>[^,]*)
)?
)?
$
/x, feature)
end
end | lib/cabo_cha.ex | 0.60964 | 0.810591 | cabo_cha.ex | starcoder |
defmodule Commands.GeneralCommands do
use Memoize
alias Interp.Functions
alias Interp.Interpreter
alias Interp.Stack
alias Interp.Globals
alias Interp.Environment
alias Interp.RecursiveEnvironment
alias HTTPoison
alias Commands.ListCommands
alias Commands.IntCommands
alias Commands.GeneralCommands
require Interp.Functions
def head(value) do
cond do
Functions.is_iterable(value) -> List.first Enum.to_list(Stream.take(value, 1))
is_integer(value) -> head(Functions.to_non_number(value))
true -> String.slice(value, 0..0)
end
end
def dehead(value) do
cond do
Functions.is_iterable(value) -> Stream.drop(value, 1) |> Stream.map(fn x -> x end)
true -> String.slice(to_string(value), 1..-1)
end
end
def tail(value) do
cond do
Functions.is_iterable(value) -> hd(Enum.slice(Enum.to_list(value), -1..-1))
is_integer(value) -> tail(Functions.to_non_number(value))
true -> String.slice(value, -1..-1)
end
end
def detail(value) do
cond do
Functions.is_iterable(value) ->
value |> Enum.reverse |> tl |> Enum.reverse |> Stream.map(fn x -> x end)
true -> String.slice(to_string(value), 0..-2)
end
end
def element_at(value, index) when index < 0, do: element_at(value, IntCommands.mod(index, length_of(value)))
def element_at(value, index) do
case value |> Stream.drop(index) |> Stream.take(1) |> Enum.to_list |> List.first do
nil ->
cond do
value |> length_of() == 0 -> value
true -> Stream.cycle(value) |> Stream.drop(index) |> Stream.take(1) |> Enum.to_list |> List.first
end
head -> head
end
end
def remove_from(value, filter_chars) do
filter_chars = Functions.to_str Functions.stream(filter_chars)
value = Functions.to_str(value)
cond do
Functions.is_iterable(value) -> value |> Stream.map(fn x -> remove_from(x, filter_chars) end)
true -> Enum.reduce(Enum.filter(String.graphemes(value), fn x -> not Enum.member?(filter_chars, Functions.to_str x) end), "", fn (element, acc) -> acc <> element end)
end
end
def vectorized_equals(a, b) do
cond do
Functions.is_iterable(a) and not Functions.is_iterable(b) -> a |> Stream.map(fn x -> vectorized_equals(x, b) end)
not Functions.is_iterable(a) and Functions.is_iterable(b) -> b |> Stream.map(fn x -> vectorized_equals(a, x) end)
Functions.is_iterable(a) and Functions.is_iterable(b) -> Stream.zip(a, b) |> Stream.map(fn {x, y} -> vectorized_equals(x, y) end)
true -> Functions.to_number(a) == Functions.to_number(b)
end
end
def equals(a, b) do
cond do
Functions.is_iterable(a) and not Functions.is_iterable(b) -> false
not Functions.is_iterable(a) and Functions.is_iterable(b) -> false
true -> Functions.eval(Functions.to_number(a)) == Functions.eval(Functions.to_number(b))
end
end
def all_equal(value) do
cond do
Functions.is_iterable(value) ->
case Enum.take(value, 1) do
[] -> true
element -> Enum.all?(value, fn x -> equals(x, hd(element)) end)
end
true ->
all_equal(String.graphemes(to_string(value)))
end
end
def count(value, element) when Functions.is_iterable(value), do: value |> Enum.count(fn x -> equals(x, element) end)
def count(value, element), do: count(value, element, 0)
defp count("", _, count), do: count
defp count(value, element, count), do: count(value |> String.slice(1..-1), element, count + Functions.to_number(value |> String.starts_with?(element)))
def strict_count(value, element) when not Functions.is_iterable(value) and not Functions.is_iterable(element), do: element |> Stream.map(fn x -> count(value, x) end)
def strict_count(value, element) when not Functions.is_iterable(value), do: count(value, element)
def strict_count(value, element) when Functions.is_iterable(value), do: value |> Enum.count(fn x -> equals(x, element) end)
def enclose(value) do
cond do
Functions.is_iterable(value) -> Stream.concat(value, Stream.take(value, 1)) |> Stream.map(fn x -> x end)
true -> Functions.to_non_number(value) <> head(value)
end
end
def concat(a, b) do
cond do
Functions.is_iterable(a) and Functions.is_iterable(b) -> Stream.concat(a, b) |> Stream.map(fn x -> x end)
Functions.is_iterable(a) and not Functions.is_iterable(b) -> a |> Stream.map(fn x -> concat(x, b) end)
not Functions.is_iterable(a) and Functions.is_iterable(b) -> b |> Stream.map(fn x -> concat(a, x) end)
true -> to_string(a) <> to_string(b)
end
end
def length_of(a) do
cond do
Functions.is_iterable(a) -> length(Enum.to_list(a))
true -> String.length(to_string(a))
end
end
@doc """
Loop method. This method iteratively runs the given commands on the given index and the given range.
After each iteration of running the code, it also gives the resulting stack and resulting environment.
## Parameters
- commands: A list of commands that the program will run on.
- stack: A Stack object which contains the current state of the stack.
- environment: The environment in which the program will be run in.
- index: The current index of the loop iteration.
- range: The range of the loop. If the range is an integer, the loop will run from n <- index..range
If the range of the loop is a string or a list, it will iterate over each element in the given range.
"""
def loop(commands, stack, environment, index, range) do
case Globals.get().status do
:ok ->
cond do
# If the range is an integer and the index is in bounds, run the commands
# and increment the index by 1 on the next iteration.
(is_integer(range) and index <= range) or range == :infinity ->
{new_stack, new_env} = Interpreter.interp(commands, stack, %{environment | range_variable: index})
loop(commands, new_stack, new_env, index + 1, range)
# If the range is a list/stream/map, take the first element after 'index' elements
# and check if the current index is in bounds (i.e. curr_element != []).
Functions.is_iterable(range) ->
curr_element = range |> Stream.drop(index) |> Stream.take(1) |> Enum.to_list
case curr_element do
[] -> {stack, environment}
x ->
{new_stack, new_env} = Interpreter.interp(commands, stack, %{environment | range_variable: index, range_element: hd(x)})
loop(commands, new_stack, new_env, index + 1, range)
end
# If the range is a string, convert to a list of strings and loop on that.
is_bitstring(range) ->
loop(commands, stack, environment, index, String.graphemes(range))
# If none of the above applies, that means that the index is out of bounds and
# we will return the final state of the stack and the environment.
true ->
{stack, environment}
end
:break ->
Globals.set(%{Globals.get() | status: :ok})
{stack, environment}
:quit -> {stack, environment}
end
end
def run_while(prev_result, commands, environment, index, prev_results \\ nil) do
{result_stack, new_env} = Interpreter.interp(commands, %Stack{elements: [prev_result]}, %{environment | range_variable: index, range_element: prev_result})
{result, _, new_env} = Stack.pop(result_stack, new_env)
cond do
GeneralCommands.equals(prev_result, result) and prev_results == nil -> {result, new_env}
GeneralCommands.equals(prev_result, result) -> {prev_results |> Enum.reverse, new_env}
prev_results == nil -> run_while(result, commands, new_env, index + 1)
true -> run_while(result, commands, new_env, index + 1, [result | prev_results])
end
end
def counter_loop(commands, stack, environment, index, count) do
case Globals.get().status do
:ok ->
cond do
Globals.get().counter_variable >= count -> {stack, environment}
true ->
{result_stack, new_env} = Interpreter.interp(commands, stack, %{environment | range_variable: index})
counter_loop(commands, result_stack, new_env, index + 1, count)
end
:break ->
Globals.set(%{Globals.get() | status: :ok})
{stack, environment}
:quit -> {stack, environment}
end
end
defmemo recursive_program(commands, base_cases, n) do
cond do
n < 0 -> 0
n < length(base_cases) -> Enum.at(base_cases, n)
true ->
{stack, new_env} = Interpreter.interp(commands, %Stack{elements: []}, %Environment{range_variable: n, recursive_environment: %RecursiveEnvironment{subprogram: commands, base_cases: base_cases}})
{head, _, _} = Stack.pop(stack, new_env)
head
end
end
def map_every(commands, environment, list, nth) do
cond do
Functions.is_iterable(nth) ->
list
|> Stream.with_index(nth |> Stream.take(1) |> Enum.to_list |> List.first)
|> Stream.transform({nth |> Stream.cycle, 0}, fn ({x, index}, {nth, offset}) ->
head = nth |> Stream.take(1) |> Enum.to_list |> List.first
cond do
head == 0 -> {[x], {nth, offset}}
index - offset == head -> {[Interpreter.flat_interp(commands, [x], environment)], {nth |> Stream.drop(1), index}}
true -> {[x], {nth, offset}}
end
end) |> Stream.map(fn x -> x end)
true ->
list |> Stream.map_every(nth, fn x -> Interpreter.flat_interp(commands, [x], environment) end)
end
end
def get_url(url) do
cond do
url |> String.starts_with?("http") -> HTTPoison.get!(url).body
true -> HTTPoison.get!("http://" <> url).body
end
end
def starts_with(left, right) when Functions.is_iterable(left) and Functions.is_iterable(right) do
cond do
equals(left |> Stream.take(length(Enum.to_list(right))) |> Enum.to_list, right) -> true
true -> false
end
end
def starts_with(left, right) when Functions.is_iterable(left), do: left |> Stream.map(fn x -> x |> starts_with(right) end)
def starts_with(left, right) when Functions.is_iterable(right), do: right |> Stream.map(fn x -> left |> starts_with(x) end)
def starts_with(left, right), do: String.starts_with?(to_string(left), to_string(right))
def ends_with(left, right) when Functions.is_iterable(left) and Functions.is_iterable(right), do: starts_with(left |> ListCommands.reverse, right |> ListCommands.reverse)
def ends_with(left, right) when Functions.is_iterable(left), do: starts_with(left |> Stream.map(&ListCommands.reverse/1), right |> ListCommands.reverse)
def ends_with(left, right) when Functions.is_iterable(right), do: starts_with(left |> ListCommands.reverse, right |> Stream.map(&ListCommands.reverse/1))
def ends_with(left, right), do: starts_with(left |> ListCommands.reverse, right |> ListCommands.reverse)
end | lib/commands/gen_commands.ex | 0.755637 | 0.648578 | gen_commands.ex | starcoder |
defmodule Krakex do
@moduledoc """
Kraken API Client.
The Kraken API is divided into several sections:
## Public market data
* `server_time/1` - Get server time.
* `assets/2` - Get asset info.
* `asset_pairs/2` - Get tradable asset pairs.
* `ticker/2` - Get ticker information.
* `ohlc/3` - Get OHLC data.
* `depth/3` - Get order book.
* `trades/3` - Get recent trades.
* `spread/3` - Get recent spread data.
## Private user data
* `balance/1` - Get account balance.
* `trade_balance/2` - Get trade balance.
* `open_orders/2` - Get open orders.
* `closed_orders/2` - Get closed orders.
* `query_orders/3` - Query orders info.
* `trades_history/3` - Get trades history.
* `query_trades/3` - Query trades info.
* `open_positions/3` - Get open positions.
* `ledgers/3` - Get ledgers info.
* `query_ledgers/2` - Query ledgers.
* `trade_volume/2` - Get trade volume.
## Private user trading
* `add_order/6` - Add standard order.
* `cancel_order/2` - Cancel open order.
## Private user funding
* `deposit_methods/3` - Get deposit methods.
* `deposit_addresses/4` - Get deposit addresses.
* `deposit_status/4` - Get status of recent deposits.
* `withdraw_info/5` - Get withdrawal information.
* `withdraw/5` (not implemented) - Withdraw funds.
* `withdraw_status/3` (not implemented) - Get status of recent withdrawals.
* `withdraw_cancel/4` (not implemented) - Request withdrawal cancelation.
"""
alias Krakex.Client
@api Application.get_env(:krakex, :api_mod, Krakex.API)
@doc """
Get server time.
This is to aid in approximating the skew time between the server and client.
Returns a map with the fields:
* `"unixtime"` - as unix timestamp.
* `"rfc1123"` - as RFC 1123 time format.
## Example response:
{:ok, %{"rfc1123" => "Thu, 4 Jan 18 14:57:58 +0000", "unixtime" => 1515077878}}
"""
@spec server_time(Client.t()) :: Krakex.API.response()
def server_time(client \\ @api.public_client()) do
@api.public_request(client, "Time")
end
@doc """
Get asset info.
Takes the following keyword options:
* `:info` - info to retrieve. `"info"` (default)
* `:aclass` - asset class. `"currency"` (default)
* `:asset` - list of assets to get info on. Returns all (default)
Returns a map of asset names and a map of their info with the fields:
* `"altname"` - alternate name.
* `"aclass"` - asset class.
* `"decimals"` - scaling decimal places for record keeping.
* `"display_decimals"` - scaling decimal places for output display.
## Example response:
{:ok, %{"BCH" => %{"aclass" => "currency", "altname" => "BCH",
"decimals" => 10, "display_decimals" => 5}}}
"""
@spec assets(Client.t(), keyword) :: Krakex.API.response()
def assets(client \\ @api.public_client(), opts \\ [])
def assets(%Client{} = client, opts) when is_list(opts) do
@api.public_request(client, "Assets", opts)
end
def assets(opts, []) do
@api.public_request(@api.public_client(), "Assets", opts)
end
@doc """
Get tradable asset pairs.
Takes the following keyword options:
* `:info` - info to retrieve.
* `"info"` - all info (default).
* `"leverage"` - leverage info.
* `"fees"` - fees schedule.
* `"margin"` - margin info.
* `:pair` - list of asset pairs to get info on. Returns all (default)
Returns a map of asset pairs and a map of their info with the fields:
* `"altname"` - alternate pair name.
* `"aclass_base"` - asset class of base component.
* `"base"` - asset id of base component.
* `"aclass_quote"` - asset class of quote component.
* `"quote"` - asset id of quote component.
* `"lot"` - volume lot size.
* `"pair_decimals"` - scaling decimal places for pair.
* `"lot_decimals"` - scaling decimal places for volume.
* `"lot_multiplier"` - amount to multiply lot volume by to get currency volume.
* `"leverage_buy"` - array of leverage amounts available when buying.
* `"leverage_sell"` - array of leverage amounts available when selling.
* `"fees"` - fee schedule array in [volume, percent fee] tuples.
* `"fees_maker"` - maker fee schedule array in [volume, percent fee] tuples (if on maker/taker).
* `"fee_volume_currency"` - volume discount currency.
* `"margin_call"` - margin call level.
* `"margin_stop"` - stop-out/liquidation margin level.
## Example response:
{:ok, %{"BCHEUR" => %{"aclass_base" => "currency", "aclass_quote" => "currency",
"altname" => "BCHEUR", "base" => "BCH", "fee_volume_currency" => "ZUSD",
"fees" => [[0, 0.26], [50000, 0.24], [100000, 0.22], [250000, 0.2],
[500000, 0.18], [1000000, 0.16], [2500000, 0.14], [5000000, 0.12],
[10000000, 0.1]],
"fees_maker" => [[0, 0.16], [50000, 0.14], [100000, 0.12], [250000, 0.1],
[500000, 0.08], [1000000, 0.06], [2500000, 0.04], [5000000, 0.02],
[10000000, 0]], "leverage_buy" => [], "leverage_sell" => [],
"lot" => "unit", "lot_decimals" => 8, "lot_multiplier" => 1,
"margin_call" => 80, "margin_stop" => 40, "pair_decimals" => 1,
"quote" => "ZEUR"}}
"""
@spec asset_pairs(Client.t(), keyword) :: Krakex.API.response()
def asset_pairs(client \\ @api.public_client(), opts \\ [])
def asset_pairs(%Client{} = client, opts) when is_list(opts) do
@api.public_request(client, "AssetPairs", opts)
end
def asset_pairs(opts, []) do
@api.public_request(@api.public_client(), "AssetPairs", opts)
end
@doc """
Get ticker information.
Takes list of asset pairs to get info on.
Returns a map of asset pairs and a map of their ticker info with the fields:
* `"a"` - ask array(_price_, _whole lot volume_, _lot volume_).
* `"b"` - bid array(_price_, _whole lot volume_, _lot volume_).
* `"c"` - last trade closed array(_price_, _lot volume_).
* `"v"` - volume array(_today_, _last 24 hours_).
* `"p"` - volume weighted average price array(_today_, _last 24 hours_).
* `"t"` - number of trades array(_today_, _last 24 hours_).
* `"l"` - low array(_today_, _last 24 hours_).
* `"h"` - high array(_today_, _last 24 hours_).
* `"o"` - today's opening price.
## Example response:
{:ok,
%{"BCHEUR" => %{"a" => ["2034.800000", "1", "1.000"],
"b" => ["2025.000000", "8", "8.000"], "c" => ["2025.000000", "0.03660000"],
"h" => ["2140.000000", "2227.600000"],
"l" => ["1942.000000", "1942.000000"], "o" => "2134.000000",
"p" => ["2021.440397", "2051.549114"], "t" => [3824, 6704],
"v" => ["1956.76538027", "4086.36386115"]}}}
"""
@spec ticker(Client.t(), [binary]) :: Krakex.API.response()
def ticker(client \\ @api.public_client(), pairs) when is_list(pairs) do
@api.public_request(client, "Ticker", pair: pairs)
end
@doc """
Get OHLC data.
An open-high-low-close chart is a type of chart typically used to illustrate movements
in the price of a financial instrument over time. Each vertical line on the chart shows
the price range (the highest and lowest prices) over one unit of time, e.g., one day or
one hour Takes list of asset pairs to get info on.
Takes an asset pair and the following keyword options:
* `:interval` - time frame interval in minutes. 1 (default), 5, 15, 30, 60, 240, 1440, 10080, 21600
* `:since` - return committed OHLC data since given id (exclusive).
Returns a map with the asset pair and a list of lists with the entries (_time_, _open_, _high_,
_low_, _close_, _vwap_, _volume_, _count_) and:
* `"last"` - id to be used as since when polling for new, committed OHLC data.
Note: the last entry in the OHLC array is for the current, not-yet-committed frame and will
always be present, regardless of the value of `:since`.
## Example response:
{:ok,
%{"BCHEUR" => [[1515037200, "2051.7", "2051.7", "2051.7", "2051.7", "0.0", "0.00000000", 0],
[1515037260, "2051.7", "2051.7", "2045.0", "2045.0", "2045.0", "0.01500000", 1],
[1515037320, "2045.0", "2050.8", "2045.0", "2050.8", "2050.7", "2.37135868", 2],
[1515037380, "2050.8", "2050.8", "2050.8", "2050.8", "0.0", "0.00000000", 0],
...],
"last" => 1515080280}}
"""
@spec ohlc(Client.t(), binary, keyword) :: Krakex.API.response()
def ohlc(client \\ @api.public_client(), pair, opts \\ [])
def ohlc(%Client{} = client, pair, opts) when is_list(opts) do
@api.public_request(client, "OHLC", [pair: pair] ++ opts)
end
def ohlc(pair, opts, []) do
@api.public_request(@api.public_client(), "OHLC", [pair: pair] ++ opts)
end
@doc """
Get order book.
Returns the market depth for an asset pair.
Takes an asset pair and the following keyword options:
* `:count` - maximum number of asks/bids.
Returns a map of the asset pair and a map of the info with the fields:
* `"asks"` - ask side array of array entries (_price_, _volume_, _timestamp_).
* `"bids"` - bid side array of array entries (_price_, _volume_, _timestamp_).
## Example response:
{:ok,
%{"BCHEUR" => %{"asks" => [["2033.900000", "4.937", 1515082275],
["2034.000000", "0.548", 1515081910],
["2034.500000", "0.005", 1515081281],
["2034.800000", "4.637", 1515082048]],
"bids" => [["2025.000000", "1.024", 1515081702],
["2022.200000", "0.140", 1515078885],
["2022.100000", "0.280", 1515078852],
["2021.400000", "0.248", 1515080222]]}}}
"""
@spec depth(Client.t(), binary, keyword) :: Krakex.API.response()
def depth(client \\ @api.public_client(), pair, opts \\ [])
def depth(%Client{} = client, pair, opts) do
@api.public_request(client, "Depth", [pair: pair] ++ opts)
end
def depth(pair, opts, []) do
@api.public_request(@api.public_client(), "Depth", [pair: pair] ++ opts)
end
@doc """
Get recent trades.
Returns the trade data for an asset pair.
Takes an asset pair and the following keyword options:
* `:since` - return committed OHLC data since given id (exclusive).
Returns a map with the asset pair and a list of lists with the entries (_price_, _volume_, _time_,
_buy/sell_, _market/limit_, _miscellaneous_) and:
* `"last"` - id to be used as since when polling for new trade data.
## Example response:
{:ok,
%{"BCHEUR" => [["2008.100000", "0.09000000", 1515066097.1379, "b", "m", ""],
["2008.200000", "0.24850000", 1515066097.1663, "b", "m", ""],
["2008.300000", "4.36233575", 1515066097.1771, "b", "m", ""],
["2005.000000", "0.04107303", 1515066117.0598, "s", "l", ""],
["2008.000000", "0.07700000", 1515066117.389, "b", "l", ""],
"last" => "1515076587511702121"}}
"""
@spec trades(Client.t(), binary, keyword) :: Krakex.API.response()
def trades(client \\ @api.public_client(), pair, opts \\ [])
def trades(%Client{} = client, pair, opts) do
@api.public_request(client, "Trades", [pair: pair] ++ opts)
end
def trades(pair, opts, []) do
@api.public_request(@api.public_client(), "Trades", [pair: pair] ++ opts)
end
@doc """
Get recent spread data.
Returns the spread data for an asset pair.
Takes an asset pair and the following keyword options:
* `:since` - return spread data since given id (inclusive).
Returns a map with the asset pair and a list of lists with the entries
(_time_, _bid_, _ask_) and:
* `"last"` - id to be used as since when polling for new trade data.
## Example response:
{:ok,
%{"BCHEUR" => [[1515079584, "2025.000000", "2025.000000"],
[1515079584, "2025.000000", "2036.100000"],
[1515079594, "2025.000000", "2025.000000"],
[1515079596, "2025.000000", "2026.000000"],
[1515080461, "2025.500000", "2034.100000"],
[1515080462, "2025.000000", "2034.100000"]],
"last" => 1515083299}}
"""
@spec spread(Client.t(), binary, keyword) :: Krakex.API.response()
def spread(client \\ @api.public_client(), pair, opts \\ [])
def spread(%Client{} = client, pair, opts) do
@api.public_request(client, "Spread", [pair: pair] ++ opts)
end
def spread(pair, opts, []) do
@api.public_request(@api.public_client(), "Spread", [pair: pair] ++ opts)
end
@doc """
Get account balance.
Returns a map with the asset names and balance amount.
## Example response:
{:ok, %{"XXBT" => "0.0400000000", "XXRP" => "160.00000000", "ZEUR" => "67.6613"}}
"""
@spec balance(Client.t()) :: Krakex.API.response()
def balance(client \\ @api.private_client()) do
@api.private_request(client, "Balance")
end
@doc """
Get trade balance.
Takes the following keyword options:
* `:aclass` - asset class. `"currency"` (default)
* `:asset` - base asset used to determine balance. `"ZUSD"` (default)
Returns a map with the fields:
* `"eb"` - equivalent balance (combined balance of all currencies).
* `"tb"` - trade balance (combined balance of all equity currencies).
* `"m"` - margin amount of open positions.
* `"n"` - unrealized net profit/loss of open positions.
* `"c"` - cost basis of open positions.
* `"v"` - current floating valuation of open positions.
* `"e"` - equity = trade balance + unrealized net profit/loss.
* `"mf"` - free margin = equity - initial margin (maximum margin available to open new positions).
* `"ml"` - margin level = (equity / initial margin) * 100.
Note: Rates used for the floating valuation is the midpoint of the best bid and ask prices.
## Example response:
{:ok,
%{"c" => "0.0000", "e" => "725.4974", "eb" => "1177.9857", "m" => "0.0000",
"mf" => "725.4974", "n" => "0.0000", "tb" => "725.4974", "v" => "0.0000"}}
"""
@spec trade_balance(Client.t(), keyword) :: Krakex.API.response()
def trade_balance(client \\ @api.private_client(), opts \\ [])
def trade_balance(%Client{} = client, opts) do
@api.private_request(client, "TradeBalance", opts)
end
def trade_balance(opts, []) do
@api.private_request(@api.private_client(), "TradeBalance", opts)
end
@doc """
Get open orders.
Takes the following keyword options:
* `:trades` - whether or not to include trades in output. `false` (default)
* `:userref` - restrict results to given user reference id.
Returns a map with the txid as the key and the value is a map with the fields:
* `"refid"` - Referral order transaction id that created this order.
* `"userref"` - user reference id.
* `"status"` - status of order:
* `"pending"` - order pending book entry.
* `"open"` - open order.
* `"closed"` - closed order.
* `"canceled"` - order cancelled.
* `"expired"` - order expired.
* `"opentm"` - unix timestamp of when order was placed.
* `"starttm"` - unix timestamp of order start time (or 0 if not set).
* `"expiretm"` - unix timestamp of order end time (or 0 if not set).
* `"descr"` - order description info:
* `"pair"` - asset pair.
* `"type"` - type of order (buy/sell).
* `"ordertype"` - order type (See Add standard order).
* `"price"` - primary price.
* `"price2"` - secondary price.
* `"leverage"` - amount of leverage.
* `"order"` - order description.
* `"close"` - conditional close order description (if conditional close set).
* `"vol"` - volume of order (base currency unless viqc set in oflags).
* `"vol_exec"` - volume executed (base currency unless viqc set in oflags).
* `"cost"` - total cost (quote currency unless unless viqc set in oflags).
* `"fee"` - total fee (quote currency).
* `"price"` - average price (quote currency unless viqc set in oflags).
* `"stopprice"` - stop price (quote currency, for trailing stops).
* `"limitprice"` - triggered limit price (quote currency, when limit based order type triggered).
* `"misc"` - comma delimited list of miscellaneous info:
* `"stopped"` - triggered by stop price.
* `"touched"` - triggered by touch price.
* `"liquidated"` - liquidation.
* `"partial"` - partial fill.
* `"oflags"` - comma delimited list of order flags:
* `"viqc"` - volume in quote currency.
* `"fcib"` - prefer fee in base currency (default if selling).
* `"fciq"` - prefer fee in quote currency (default if buying).
* `"nompp"` - no market price protection.
* `"trades"` - array of trade ids related to order (if trades info requested and data available).
Note: Unless otherwise stated, costs, fees, prices, and volumes are in the asset pair's
scale, not the currency's scale. For example, if the asset pair uses a lot size that has a
scale of 8, the volume will use a scale of 8, even if the currency it represents only has a
scale of 2. Similarly, if the asset pair's pricing scale is 5, the scale will remain as 5,
even if the underlying currency has a scale of 8.
## Example response:
{:ok,
%{
"open" => %{
"OVAQ4T-WFN4B-J246BW" => %{
"cost" => "0.00000000",
"descr" => %{
"close" => "",
"leverage" => "none",
"order" => "sell 100.00000000 XRPEUR @ limit 1.55000",
"ordertype" => "limit",
"pair" => "XRPEUR",
"price" => "1.55000",
"price2" => "0",
"type" => "sell"
},
"expiretm" => 0,
"fee" => "0.00000000",
"limitprice" => "0.00000000",
"misc" => "",
"oflags" => "fciq",
"opentm" => 1516957593.9522,
"price" => "0.00000000",
"refid" => nil,
"starttm" => 0,
"status" => "open",
"stopprice" => "0.00000000",
"userref" => 0,
"vol" => "100.00000000",
"vol_exec" => "0.00000000"
}
}
}}
"""
@spec open_orders(Client.t(), keyword) :: Krakex.API.response()
def open_orders(client \\ @api.private_client(), opts \\ [])
def open_orders(%Client{} = client, opts) when is_list(opts) do
@api.private_request(client, "OpenOrders", opts)
end
def open_orders(opts, []) do
@api.private_request(@api.private_client(), "OpenOrders", opts)
end
@doc """
Get closed orders.
Takes the following keyword options:
* `:trades` - whether or not to include trades in output. `false` (default)
* `:userref` - restrict results to given user reference id.
* `:start` - starting unix timestamp or order tx id of results (exclusive).
* `:end` - ending unix timestamp or order tx id of results (inclusive).
* `:ofs` - result offset.
* `:closetime` - which time to use.
* `"open"`
* `"close"`
* `"both"` - (default).
Returns a map with the key `"closed"` and a map of closed orders as the value. Additionally, the
map may contain:
* `"count"` - amount of available order info matching criteria.
The map of closed orders has the txid as the key and the value is a map with the same fields as
in open orders (see `open_orders/2`) but can contain the additional fields:
* `"closetm"` - unix timestamp of when order was closed.
* `"reason"` - additional info on status (if any).
Note: Times given by order tx ids are more accurate than unix timestamps. If an order tx id is
given for the time, the order's open time is used.
## Example response:
{:ok,
%{
"closed" => %{
"O5KKP6-NXBOJ-KPXCTA" => %{
"closetm" => 1516182880.603,
"cost" => "57.0",
"descr" => %{
"close" => "",
"leverage" => "none",
"order" => "buy 0.00670000 XBTEUR @ market",
"ordertype" => "market",
"pair" => "XBTEUR",
"price" => "0",
"price2" => "0",
"type" => "buy"
},
"expiretm" => 0,
"fee" => "0.00000",
"limitprice" => "0.00000",
"misc" => "",
"oflags" => "fciq",
"opentm" => 1516182880.5874,
"price" => "8510.4",
"reason" => nil,
"refid" => nil,
"starttm" => 0,
"status" => "closed",
"stopprice" => "0.00000",
"userref" => 0,
"vol" => "0.00670000",
"vol_exec" => "0.00670000"
}
}
}}
"""
@spec closed_orders(Client.t(), keyword) :: Krakex.API.response()
def closed_orders(client \\ @api.private_client(), opts \\ [])
def closed_orders(%Client{} = client, opts) when is_list(opts) do
@api.private_request(client, "ClosedOrders", opts)
end
def closed_orders(opts, []) do
@api.private_request(@api.private_client(), "ClosedOrders", opts)
end
@doc """
Query orders info.
Takes a list of (maximum 20) tx_ids to query info about and the following keyword options:
* `:trades` - whether or not to include trades in output. `false` (default)
* `:userref` - restrict results to given user reference id.
Returns a map with the txid as the key and the value is a map with the fields as described in
`open_orders/2`.
"""
@spec query_orders(Client.t(), [binary], keyword) :: Krakex.API.response()
def query_orders(client \\ @api.private_client(), tx_ids, opts \\ [])
def query_orders(%Client{} = client, tx_ids, opts) when is_list(opts) do
@api.private_request(client, "QueryOrders", [txid: tx_ids] ++ opts)
end
def query_orders(tx_ids, opts, []) do
@api.private_request(@api.private_client(), "QueryOrders", [txid: tx_ids] ++ opts)
end
@doc """
Get trades history.
Takes an offset and the following keyword options:
* `:type` - type of trade:
* `"all"` - all types. (default)
* `"any position"` - any position (open or closed).
* `"closed position"` - positions that have been closed.
* `"closing position"` - any trade closing all or part of a position.
* `"no position"` - non-positional trades.
* `:trades` - whether or not to include trades related to position in output. (default: `false`)
* `:start` - starting unix timestamp or trade tx id of results. (exclusive)
* `:end` - ending unix timestamp or trade tx id of results. (inclusive)
Returns a map with the fields `"trades"` and `"count"`. The map of trades has the txid as the key
and the value is a map with fields:
* `"ordertxid"` - order responsible for execution of trade.
* `"pair"` - asset pair.
* `"time"` - unix timestamp of trade.
* `"type"` - type of order (buy/sell).
* `"ordertype"` - order type.
* `"price"` - average price order was executed at (quote currency).
* `"cost"` - total cost of order (quote currency).
* `"fee"` - total fee (quote currency).
* `"vol"` - volume (base currency).
* `"margin"` - initial margin (quote currency).
* `"misc"` - comma delimited list of miscellaneous info:
* `"closing"` - trade closes all or part of a position.
If the trade opened a position, the follow fields are also present in the trade info:
* `"posstatus"` - position status (open/closed).
* `"cprice"` - average price of closed portion of position (quote currency).
* `"ccost"` - total cost of closed portion of position (quote currency).
* `"cfee"` - total fee of closed portion of position (quote currency).
* `"cvol"` - total fee of closed portion of position (quote currency).
* `"cmargin"` - total margin freed in closed portion of position (quote currency).
* `"net"` - net profit/loss of closed portion of position (quote currency, quote currency scale).
* `"trades"` - list of closing trades for position (if available).
Note:
* Unless otherwise stated, costs, fees, prices, and volumes are in the asset pair's scale, not
the currency's scale.
* Times given by trade tx ids are more accurate than unix timestamps.
## Example response:
{:ok,
%{
"count" => 82,
"trades" => %{
"TECAE6-7ZWNZ-WICHNR" => %{
"cost" => "5.11000",
"fee" => "0.00818",
"margin" => "0.00000",
"misc" => "",
"ordertxid" => "OAOO5O-RAUU2-BCKZIH",
"ordertype" => "limit",
"pair" => "XXBTZEUR",
"price" => "365.00000",
"time" => 1457183489.6049,
"type" => "buy",
"vol" => "0.01400000"
}
}
}}
"""
@spec trades_history(Client.t(), integer, keyword) :: Krakex.API.response()
def trades_history(client \\ @api.private_client(), offset, opts \\ [])
def trades_history(%Client{} = client, offset, opts) do
@api.private_request(client, "TradesHistory", [ofs: offset] ++ opts)
end
def trades_history(offset, opts, []) do
@api.private_request(@api.private_client(), "TradesHistory", [ofs: offset] ++ opts)
end
@doc """
Query trades info.
Takes a list of (maximum 20) tx_ids and the following keyword options:
* `:trades` - whether or not to include trades related to position in output. (default: `false`)
Returns a map with the same fields as described in `trades_history/2`.
"""
@spec query_trades(Client.t(), [binary], keyword) :: Krakex.API.response()
def query_trades(client \\ @api.private_client(), tx_ids, opts \\ [])
def query_trades(%Client{} = client, tx_ids, opts) when is_list(opts) do
@api.private_request(client, "QueryTrades", [txid: tx_ids] ++ opts)
end
def query_trades(tx_ids, opts, []) do
@api.private_request(@api.private_client(), "QueryTrades", [txid: tx_ids] ++ opts)
end
@doc """
Get open positions.
Takes a list of tx_ids to restrict output to and the following keyword options:
* `:docalcs` - whether or not to include profit/loss calculations. (default: `false`)
Returns a map with the txid as the key and the value is a map with fields:
* `"ordertxid"` - order responsible for execution of trade.
* `"pair"` - asset pair.
* `"time"` - unix timestamp of trade.
* `"type"` - type of order used to open position (buy/sell).
* `"ordertype"` - order type used to open position.
* `"cost"` - opening cost of position (quote currency unless viqc set in `"oflags"`).
* `"fee"` - opening fee of position (quote currency).
* `"vol"` - position volume (base currency unless viqc set in `"oflags"`).
* `"vol_closed"` - position volume closed (base currency unless viqc set in `"oflags"`).
* `"margin"` - initial margin (quote currency).
* `"value"` - current value of remaining position (if docalcs requested. quote currency).
* `"net"` - unrealized profit/loss of remaining position (if docalcs requested. quote currency,
quote currency scale).
* `"misc"` - comma delimited list of miscellaneous info.
* `"oflags"` - comma delimited list of order flags:
* `"viqc"` - volume in quote currency.
Note: Unless otherwise stated, costs, fees, prices, and volumes are in the asset pair's scale,
not the currency's scale.
"""
@spec open_positions(Client.t(), [binary], keyword) :: Krakex.API.response()
def open_positions(client \\ @api.private_client(), tx_ids, opts \\ [])
def open_positions(%Client{} = client, tx_ids, opts) when is_list(opts) do
@api.private_request(client, "OpenPositions", [txid: tx_ids] ++ opts)
end
def open_positions(tx_ids, opts, []) do
@api.private_request(@api.private_client(), "OpenPositions", [txid: tx_ids] ++ opts)
end
@doc """
Get ledgers info.
Takes an offset and the following keyword options:
* `:aclass` - asset class. `"currency"` (default)
* `:asset` - list of assets to restrict output to. `"currency"` (default)
* `:type` - type of ledger to retrieve:
* `"all"` - default.
* `"deposit"`
* `"withdrawal"`
* `"trade"`
* `"margin"`
* `:start` - starting unix timestamp or ledger id of results. (exclusive)
* `:end` - ending unix timestamp or ledger id of results. (inclusive)
Returns a map with the ledger id as the key and the value is a map with fields:
* `"refid"` - reference id.
* `"time"` - unix timestamp of ledger.
* `"type"` - type of ledger entry.
* `"aclass"` - asset class.
* `"asset"` - asset.
* `"amount"` - transaction amount.
* `"fee"` - transaction fee.
* `"balance"` - resulting balance.
Note: Times given by ledger ids are more accurate than unix timestamps.
"""
@spec ledgers(Client.t(), integer, keyword) :: Krakex.API.response()
def ledgers(client \\ @api.private_client(), offset, opts \\ [])
def ledgers(%Client{} = client, offset, opts) do
@api.private_request(client, "Ledgers", [ofs: offset] ++ opts)
end
def ledgers(offset, opts, []) do
@api.private_request(@api.private_client(), "Ledgers", [ofs: offset] ++ opts)
end
@doc """
Query ledgers.
Takes a list of (maximum 20) ledger ids to query info about.
Returns a map with the ledger id as the key and the value is a map with fields as described
in `ledgers/3`.
"""
@spec query_ledgers(Client.t(), [binary]) :: Krakex.API.response()
def query_ledgers(client \\ @api.private_client(), ledger_ids) do
@api.private_request(client, "QueryLedgers", id: ledger_ids)
end
@doc """
Get trade volume.
Takes the following keyword options:
* `:pair` - list of asset pairs to get fee info on.
* `:"fee-info"` - whether or not to include fee info in results.
Returns a map with the following fields:
* `"currency"` - volume currency.
* `"volume"` - current discount volume.
* `"fees"` - map of asset pairs and fee tier info (if requested):
* `"fee"` - current fee in percent.
* `"minfee"` - minimum fee for pair (if not fixed fee).
* `"maxfee"` - maximum fee for pair (if not fixed fee).
* `"nextfee"` - next tier's fee for pair (if not fixed fee. `nil` if at lowest fee tier).
* `"nextvolume"` - volume level of next tier (if not fixed fee. `nil` if at lowest fee tier).
* `"tiervolume"` - volume level of current tier (if not fixed fee. `nil` if at lowest fee tier).
* `"fees_maker"` - map of asset pairs and maker fee tier info (if requested) for any pairs on maker/taker schedule:
* `"fee"` - current fee in percent.
* `"minfee"` - minimum fee for pair (if not fixed fee).
* `"maxfee"` - maximum fee for pair (if not fixed fee).
* `"nextfee"` - next tier's fee for pair (if not fixed fee. `nil` if at lowest fee tier).
* `"nextvolume"` - volume level of next tier (if not fixed fee. `nil` if at lowest fee tier).
* `"tiervolume"` - volume level of current tier (if not fixed fee. `nil` if at lowest fee tier).
Note: If an asset pair is on a maker/taker fee schedule, the taker side is given in `"fees"` and
maker side in `"fees_maker"`. For pairs not on maker/taker, they will only be given in `"fees"`.
"""
@spec trade_volume(Client.t(), keyword) :: Krakex.API.response()
def trade_volume(client \\ @api.private_client(), opts \\ [])
def trade_volume(%Client{} = client, opts) when is_list(opts) do
@api.private_request(client, "TradeVolume", opts)
end
def trade_volume(opts, []) do
@api.private_request(@api.private_client(), "TradeVolume", opts)
end
@doc """
Add standard order.
Takes the following arguments:
* asset pair
* type - `"buy"` or `"sell"`.
* ordertype - one of the following:
* `"market"`
* `"limit"` - (price = limit price).
* `"stop-loss"` - (price = stop loss price).
* `"take-profit"` - (price = take profit price).
* `"stop-loss-profit"` - (price = stop loss price, price2 = take profit price).
* `"stop-loss-profit-limit"` - (price = stop loss price, price2 = take profit price).
* `"stop-loss-limit"` - (price = stop loss trigger price, price2 = triggered limit price).
* `"take-profit-limit"` - (price = take profit trigger price, price2 = triggered limit price).
* `"trailing-stop"` - (price = trailing stop offset).
* `"trailing-stop-limit"` - (price = trailing stop offset, price2 = triggered limit offset).
* `"stop-loss-and-limit"` - (price = stop loss price, price2 = limit price).
* `"settle-position"`
* volume - order volume in lots.
and the following keyword options:
* `:price` - price (dependent upon ordertype).
* `:price2` - secondary price (dependent upon ordertype).
* `:leverage` - amount of leverage desired (default = none).
* `:oflags` - list of order flags:
* `:viqc` - volume in quote currency (not available for leveraged orders).
* `:fcib` - prefer fee in base currency.
* `:fciq` - prefer fee in quote currency.
* `:nompp` - no market price protection.
* `:post` - post only order (available when ordertype = limit).
* `:starttm` - scheduled start time:
* `0` - now (default).
* `+<n>` - schedule start time <n> seconds from now.
* `<n>` - unix timestamp of start time.
* `:expiretm` - expiration time:
* `0` - no expiration (default).
* `+<n>` - expire <n> seconds from now.
* `<n>` - unix timestamp of expiration time.
* `:userref` - user reference id. 32-bit signed number.
* `:validate` - validate inputs only (does not submit order).
Returns a map with the following fields:
* `"descr"` - order description info.
* `"order"` - order description.
* `"close"` - onditional close order description (if conditional close set).
* `"txid"` - array of transaction ids for order (if order was added successfully).
Note:
* See `asset_pairs/2` for specifications on asset pair prices, lots, and leverage.
* Prices can be preceded by `+`, `-`, or `#` to signify the price as a relative amount (with
the exception of trailing stops, which are always relative). `+` adds the amount to the
current offered price. `-` subtracts the amount from the current offered price. `#` will
either add or subtract the amount to the current offered price, depending on the type and
order type used. Relative prices can be suffixed with a `%` to signify the relative amount
as a percentage of the offered price.
* For orders using leverage, 0 can be used for the volume to auto-fill the volume needed to
close out your position.
* If you receive the error `"EOrder:Trading agreement required"`, refer to your API key
management page for further details.
## Example response:
{:ok,
%{
"descr" => %{"order" => "sell 100.00000000 XRPEUR @ limit 1.50000"},
"txid" => ["OL63HZ-UFU23-CKEBRA"]
}}
"""
@spec add_order(Client.t(), binary, binary, binary, number, keyword) :: Krakex.API.response()
def add_order(client \\ @api.private_client(), pair, type, order_type, volume, opts \\ [])
def add_order(%Client{} = client, pair, type, order_type, volume, opts) when is_list(opts) do
opts = [pair: pair, type: type, ordertype: order_type, volume: volume] ++ opts
@api.private_request(client, "AddOrder", opts)
end
def add_order(pair, type, order_type, volume, opts, []) do
opts = [pair: pair, type: type, ordertype: order_type, volume: volume] ++ opts
@api.private_request(@api.private_client(), "AddOrder", opts)
end
@doc """
Cancel open order.
Takes a tx_id for the order to cancel.
Returns a map with the following fields:
* `"count"` - number of orders canceled.
* `"pending"` - if set, order(s) is/are pending cancellation.
Note: tx_id may be a user reference id.
## Example response:
{:ok, %{"count" => 1}}
"""
@spec cancel_order(Client.t(), binary) :: Krakex.API.response()
def cancel_order(client \\ @api.private_client(), tx_id) do
@api.private_request(client, "CancelOrder", txid: tx_id)
end
@doc """
Get deposit methods.
Takes an asset and the following keyword options:
* `:aclass` - asset class. `"currency"` (default)
Returns a list of maps with the following fields:
* `"method"` - name of deposit method.
* `"limit"` - maximum net amount that can be deposited right now, or `false` if no limit.
* `"fee"` - amount of fees that will be paid.
* `"address-setup-fee"` - whether or not method has an address setup fee (optional).
## Example response:
{:ok, [%{"fee" => "5.00", "limit" => "25000.00", "method" => "SynapsePay (US Wire)"}]}
"""
@spec deposit_methods(Client.t(), binary, keyword) :: Krakex.API.response()
def deposit_methods(client \\ @api.private_client(), asset, opts \\ [])
def deposit_methods(%Client{} = client, asset, opts) when is_list(opts) do
@api.private_request(client, "DepositMethods", [asset: asset] ++ opts)
end
def deposit_methods(asset, opts, []) do
@api.private_request(@api.private_client(), "DepositMethods", [asset: asset] ++ opts)
end
@doc """
Get deposit addresses.
Takes an asset, a deposit method, and the following keyword options:
* `:aclass` - asset class. `"currency"` (default)
* `:new` - whether or not to generate a new address. (default: `false`)
Returns a list of maps with the following fields:
* `"address"` - deposit address.
* `"expiretm"` - expiration time in unix timestamp, or `0` if not expiring.
* `"new"` - whether or not address has ever been used.
## Example response:
{:ok,
[
%{
"address" => "38mKXaQiKBZn549tx41igEdLPPYMVeD34h",
"expiretm" => "0",
"new" => true
}
]}
"""
@spec deposit_addresses(Client.t(), binary, binary, keyword) :: Krakex.API.response()
def deposit_addresses(client \\ @api.private_client(), asset, method, opts \\ [])
def deposit_addresses(%Client{} = client, asset, method, opts) when is_list(opts) do
@api.private_request(client, "DepositAddresses", [asset: asset, method: method] ++ opts)
end
def deposit_addresses(asset, method, opts, []) do
opts = [asset: asset, method: method] ++ opts
@api.private_request(@api.private_client(), "DepositAddresses", opts)
end
@doc """
Get status of recent deposits.
Takes an asset, a deposit method, and the following keyword options:
* `:aclass` - asset class. `"currency"` (default)
Returns a list of maps with the following fields:
* `"method"` - name of the deposit method used.
* `"aclass"` - asset class.
* `"asset"` - asset X-ISO4217-A3 code.
* `"refid"` - reference id.
* `"txid"` - method transaction id.
* `"info"` - method transaction information.
* `"amount"` - amount deposited.
* `"fee"` - fees paid.
* `"time"` - unix timestamp when request was made.
* `"status"` - status of deposit.
* `"status-prop"` - additional status properties (if available):
* `"return"` - a return transaction initiated by Kraken.
* `"onhold"` - deposit is on hold pending review.
"""
@spec deposit_status(Client.t(), binary, binary, keyword) :: Krakex.API.response()
def deposit_status(client \\ @api.private_client(), asset, method, opts \\ [])
def deposit_status(%Client{} = client, asset, method, opts) when is_list(opts) do
@api.private_request(client, "DepositStatus", [asset: asset, method: method] ++ opts)
end
def deposit_status(asset, method, opts, []) do
opts = [asset: asset, method: method] ++ opts
@api.private_request(@api.private_client(), "DepositStatus", opts)
end
@doc """
Get withdrawal information.
Takes an asset, the withdrawal key name as set up in your account, an amount to withdraw and the
following keyword options:
* `:aclass` - asset class. `"currency"` (default)
Returns a map with the following fields:
* `"method"` - name of the withdrawal method that will be used.
* `"limit"` - maximum net amount that can be withdrawn right now.
* `"fee"` - amount of fees that will be paid.
## Example response:
{:ok,
%{
"amount" => "0.10670000",
"fee" => "0.00100000",
"limit" => "0.10770000",
"method" => "Bitcoin"
}}
"""
@spec withdraw_info(Client.t(), binary, binary, binary, keyword) :: Krakex.API.response()
def withdraw_info(client \\ @api.private_client(), asset, key, amount, opts \\ [])
def withdraw_info(%Client{} = client, asset, key, amount, opts) when is_list(opts) do
opts = [asset: asset, key: key, amount: amount] ++ opts
@api.private_request(client, "WithdrawInfo", opts)
end
def withdraw_info(asset, key, amount, opts, []) do
opts = [asset: asset, key: key, amount: amount] ++ opts
@api.private_request(@api.private_client(), "WithdrawInfo", opts)
end
end | lib/krakex.ex | 0.918496 | 0.655453 | krakex.ex | starcoder |
defmodule FarmbotFirmware.GCODE.Encoder do
@moduledoc false
alias FarmbotFirmware.{GCODE, Param}
@doc false
@spec do_encode(GCODE.kind(), GCODE.args()) :: binary()
def do_encode(:report_idle, []), do: "R00"
def do_encode(:report_begin, []), do: "R01"
def do_encode(:report_success, []), do: "R02"
def do_encode(:report_error, []), do: "R03"
def do_encode(:report_error, error), do: "R03 " <> encode_error(error)
def do_encode(:report_busy, []), do: "R04"
def do_encode(:report_axis_state, xyz), do: "R05 " <> encode_axis_state(xyz)
def do_encode(:report_calibration_state, xyz),
do: "R06 " <> encode_calibration_state(xyz)
def do_encode(:report_retry, []), do: "R07"
def do_encode(:report_echo, [echo]), do: "R08 * #{echo} *"
def do_encode(:report_invalid, []), do: "R09"
def do_encode(:report_home_complete, [:x]), do: "R11"
def do_encode(:report_home_complete, [:y]), do: "R12"
def do_encode(:report_home_complete, [:z]), do: "R13"
def do_encode(:report_position_change, [x: _] = arg),
do: "R15 " <> encode_floats(arg)
def do_encode(:report_position_change, [y: _] = arg),
do: "R16 " <> encode_floats(arg)
def do_encode(:report_position_change, [z: _] = arg),
do: "R16 " <> encode_floats(arg)
def do_encode(:report_parameters_complete, []), do: "R20"
def do_encode(:report_parameter_value, pv), do: "R21 " <> encode_pv(pv)
def do_encode(:report_calibration_parameter_value, pv),
do: "R23 " <> encode_pv(pv)
def do_encode(:report_pin_value, pv), do: "R41 " <> encode_ints(pv)
def do_encode(:report_axis_timeout, [:x]), do: "R71"
def do_encode(:report_axis_timeout, [:y]), do: "R72"
def do_encode(:report_axis_timeout, [:z]), do: "R73"
def do_encode(:report_end_stops, xxyyzz),
do: "R81 " <> encode_end_stops(xxyyzz)
def do_encode(:report_position, xyzs), do: "R82 " <> encode_floats(xyzs)
def do_encode(:report_software_version, [version]), do: "R83 " <> version
def do_encode(:report_encoders_scaled, xyz), do: "R84 " <> encode_floats(xyz)
def do_encode(:report_encoders_raw, xyz), do: "R85 " <> encode_floats(xyz)
def do_encode(:report_emergency_lock, []), do: "R87"
def do_encode(:report_no_config, []), do: "R88"
def do_encode(:report_load, uxvywz), do: "R89 " <> encode_uxvywz(uxvywz)
def do_encode(:report_debug_message, [message]), do: "R99 " <> message
def do_encode(:command_movement, xyzs), do: "G00 " <> encode_floats(xyzs)
def do_encode(:command_movement_home, [:x, :y, :z]), do: "G28"
def do_encode(:command_movement_home, [:x]),
do: "G00 " <> encode_floats(x: 0.0)
def do_encode(:command_movement_home, [:y]),
do: "G00 " <> encode_floats(y: 0.0)
def do_encode(:command_movement_home, [:z]),
do: "G00 " <> encode_floats(z: 0.0)
def do_encode(:command_movement_find_home, [:x]), do: "F11"
def do_encode(:command_movement_find_home, [:y]), do: "F12"
def do_encode(:command_movement_find_home, [:z]), do: "F13"
def do_encode(:command_movement_calibrate, [:x]), do: "F14"
def do_encode(:command_movement_calibrate, [:y]), do: "F15"
def do_encode(:command_movement_calibrate, [:z]), do: "F16"
def do_encode(:parameter_read_all, []), do: "F20"
def do_encode(:parameter_read, [parameter]),
do: "F21 P#{Param.encode(parameter)}"
def do_encode(:parameter_write, pv), do: "F22 " <> encode_pv(pv)
def do_encode(:calibration_parameter_write, pv), do: "F23 " <> encode_pv(pv)
def do_encode(:pin_write, pv), do: "F41 " <> encode_ints(pv)
def do_encode(:pin_read, p), do: "F42 " <> encode_ints(p)
def do_encode(:pin_mode_write, pm), do: "F43 " <> encode_ints(pm)
def do_encode(:servo_write, pv), do: "F61 " <> encode_ints(pv)
def do_encode(:end_stops_read, []), do: "F81"
def do_encode(:position_read, []), do: "F82"
def do_encode(:software_version_read, []), do: "F83"
def do_encode(:position_write_zero, [:x, :y, :z]), do: "F84 X1 Y1 Z1"
def do_encode(:position_write_zero, [:x]), do: "F84 X1"
def do_encode(:position_write_zero, [:y]), do: "F84 Y1"
def do_encode(:position_write_zero, [:z]), do: "F84 Z1"
def do_encode(:command_emergency_unlock, _), do: "F09"
def do_encode(:command_emergency_lock, _), do: "E"
def do_encode(), do: "R03"
@spec encode_floats([{Param.t(), float()}]) :: binary()
defp encode_floats(args) do
Enum.map(args, fn {param, value} ->
binary_float = :erlang.float_to_binary(value, decimals: 2)
String.upcase(to_string(param)) <> binary_float
end)
|> Enum.join(" ")
end
defp encode_axis_state([{axis, :idle}]),
do: String.upcase(to_string(axis)) <> "0"
defp encode_axis_state([{axis, :begin}]),
do: String.upcase(to_string(axis)) <> "1"
defp encode_axis_state([{axis, :accelerate}]),
do: String.upcase(to_string(axis)) <> "2"
defp encode_axis_state([{axis, :cruise}]),
do: String.upcase(to_string(axis)) <> "3"
defp encode_axis_state([{axis, :decelerate}]),
do: String.upcase(to_string(axis)) <> "4"
defp encode_axis_state([{axis, :stop}]),
do: String.upcase(to_string(axis)) <> "5"
defp encode_axis_state([{axis, :crawl}]),
do: String.upcase(to_string(axis)) <> "6"
defp encode_calibration_state([{axis, :idle}]),
do: String.upcase(to_string(axis)) <> "0"
defp encode_calibration_state([{axis, :home}]),
do: String.upcase(to_string(axis)) <> "1"
defp encode_calibration_state([{axis, :end}]),
do: String.upcase(to_string(axis)) <> "2"
defp encode_end_stops(xa: xa, xb: xb, ya: ya, yb: yb, za: za, zb: zb) do
"XA#{xa} XB#{xb} YA#{ya} YB#{yb} ZA#{za} ZB#{zb}"
end
defp encode_pv([{param, value}]) do
param_id = Param.encode(param)
binary_float = :erlang.float_to_binary(value, decimals: 2)
"P#{param_id} V#{binary_float}"
end
def encode_uxvywz([u_value, x_value, v_value, y_value, w_value, z_value]) do
u_int = to_string(u_value)
x_int = to_string(x_value)
v_int = to_string(v_value)
y_int = to_string(y_value)
w_int = to_string(w_value)
z_int = to_string(z_value)
"U#{u_int} X#{x_int} V#{v_int} Y#{y_int} W#{w_int} Z#{z_int}"
end
defp encode_error(error) do
case error do
:no_error -> "V0"
:emergency_lock -> "V1"
:timeout -> "V2"
:stall_detected -> "V3"
:calibration_error -> "V4"
:invalid_command -> "V14"
:no_config -> "V15"
:stall_detected_x -> "V31"
:stall_detected_y -> "V32"
:stall_detected_z -> "V33"
_ -> ""
end
end
defp encode_ints(args) do
Enum.map(args, fn {key, val} ->
String.upcase(to_string(key)) <> to_string(val)
end)
|> Enum.join(" ")
end
end | farmbot_firmware/lib/farmbot_firmware/gcode/encoder.ex | 0.705684 | 0.40539 | encoder.ex | starcoder |
defmodule HedwigTrivia do
@moduledoc """
A GenServer to hold the state and provide a general API to the game.
"""
use GenServer
require Logger
alias HedwigTrivia.{
GameState,
Logic
}
@name __MODULE__
@error_fetching_question "There was an error fetching the question"
@error_fetching_answer "I've lost track of the question. Please request another question"
@doc false
@spec start_link(map()) :: :ignore | {:error, any()} | {:ok, pid()}
def start_link(config \\ %{}) do
GenServer.start_link(@name, config, name: @name)
end
@doc """
Debug the current game state.
"""
@spec state() :: GameState.t()
def state do
GenServer.call(@name, :state)
end
@doc """
Fetch a random question.
"""
@spec question(boolean()) :: {atom(), String.t()}
def question(new \\ false) do
GenServer.call(@name, {:question, new})
end
@doc """
Return the actual answer to the question. For use when the user has given up.
"""
@spec solution() :: {atom(), String.t()}
def solution do
GenServer.call(@name, :solution)
end
@doc """
Check the user's guess against the right answer.
"""
@spec guess(String.t()) :: {atom(), String.t()}
def guess(guess) do
GenServer.call(@name, {:guess, guess})
end
@impl true
@spec init(map()) :: {:ok, GameState.t()}
def init(options) do
if options[:debug] do
Logger.info("trivia.start " <> inspect(options))
end
{:ok, GameState.new(options)}
end
@impl true
@spec handle_call(:state, any(), GameState.t()) ::
{:reply, GameState.t(), GameState.t()}
def handle_call(:state, _from, state) do
{:reply, state, state}
end
@impl true
@spec handle_call({:question, boolean()}, any(), GameState.t()) ::
{:reply, String.t(), GameState.t()}
def handle_call({:question, new}, _from, state) do
maybe_debug("trivia.question new:#{new}", state)
{atom, state, response} = fetch_question(state, new)
{:reply, {atom, response}, state}
end
@impl true
@spec handle_call(:solution, any(), GameState.t()) ::
{:reply, String.t(), GameState.t()}
def handle_call(:solution, _from, state) do
maybe_debug("trivia.solution", state)
{atom, state, response} =
case state.answer do
"" ->
# The bot has lost track of or never been asked a question, go get one
{_, state, _} = fetch_question(state, true)
{:error, state, @error_fetching_answer}
_ ->
state = %{state | answered: true}
{:ok, state, state.answer}
end
{:reply, {atom, response}, state}
end
@impl true
@spec handle_call({:guess, String.t()}, any(), GameState.t()) ::
{:reply, String.t(), GameState.t()}
def handle_call({:guess, guess}, _from, state) do
maybe_debug("trivia.guess guess: #{guess}", state)
{atom, state, response} =
case Logic.guess(state, guess) do
{:error, _} ->
{:error, state, Logic.incorrect(guess)}
{:ok, state} ->
{:ok, state, Logic.correct(guess)}
end
{:reply, {atom, response}, state}
end
defp maybe_debug(msg, state) do
if state.debug, do: Logger.info(msg <> " " <> inspect(state))
end
defp fetch_question(state, new) do
case Logic.question(state, new) do
{:error, state} -> {:error, state, @error_fetching_question}
{atom, state} -> {atom, state, Logic.compose_full_question(state)}
end
end
end | lib/hedwig_trivia.ex | 0.814975 | 0.499329 | hedwig_trivia.ex | starcoder |
defmodule RDF.Turtle.Star.CompactGraph do
@moduledoc !"""
A compact graph representation in which annotations are directly stored under
the objects of the annotated triples.
This representation is not meant for direct use, but just for the `RDF.Turtle.Encoder`.
"""
alias RDF.{Graph, Description}
def compact(graph) do
Enum.reduce(graph.descriptions, graph, fn
{{_, _, _} = quoted_triple, _}, compact_graph ->
# First check the original graph to see if the quoted triple is asserted.
if Graph.include?(graph, quoted_triple) do
annotation =
compact_graph
# We'll have to re-fetch the description, since the compact_graph might already contain
# an updated description with an annotation.
|> Graph.description(quoted_triple)
|> as_annotation()
compact_graph
|> add_annotation(quoted_triple, annotation)
|> Graph.delete_descriptions(quoted_triple)
else
compact_graph
end
_, compact_graph ->
compact_graph
end)
end
defp add_annotation(compact_graph, {{_, _, _} = quoted_triple, p, o} = triple, annotation) do
# Check if the compact graph still contains the annotated triple, we want to put the annotation under.
if Graph.describes?(compact_graph, quoted_triple) do
do_add_annotation(compact_graph, triple, annotation)
else
# It's not there anymore, which means the description of the quoted triple was already moved as an annotation.
# Next we have to search recursively for the annotation, we want to put the nested annotation under.
path = find_annotation_path(compact_graph, quoted_triple, [p, o])
do_add_annotation(compact_graph, path, annotation)
end
end
defp add_annotation(compact_graph, triple, annotation) do
do_add_annotation(compact_graph, triple, annotation)
end
defp do_add_annotation(compact_graph, {s, p, o}, annotation) do
update_in(compact_graph, [s], &put_in(&1.predications[p][o], annotation))
end
defp do_add_annotation(compact_graph, [s | path], annotation) do
update_in(compact_graph, [s], &update_annotation_in(&1, path, annotation))
end
defp update_annotation_in(_, [], annotation), do: annotation
defp update_annotation_in(description, [p, o | rest], annotation) do
%Description{
description
| predications:
update_in(description.predications, [p, o], &update_annotation_in(&1, rest, annotation))
}
end
defp find_annotation_path(compact_graph, {s, p, o}, path) do
cond do
Graph.describes?(compact_graph, s) -> [s, p, o | path]
match?({_, _, _}, s) -> find_annotation_path(compact_graph, s, [p, o | path])
end
end
defp as_annotation(description), do: %{description | subject: nil}
end | lib/rdf/serializations/turtle/star_compact_graph.ex | 0.886365 | 0.457864 | star_compact_graph.ex | starcoder |
defmodule Jerry.Utils.ListUtils do
@moduledoc false
@doc false
def nest_children([], _pred), do: []
def nest_children([x|xs], pred) do
{children, unrelated} = split_children(x, xs, pred)
[children | nest_children(unrelated, pred)]
end
# Given a parent, a list of entries and a predicate which evaluates two entries to true iff the
# the second argument is the immediate successor of the first argument. Returns a tuple
# {{successors, rest}. rest contains all entries which are not a successor (immediate or indirect)
# of the given parent. `successors` contains a list which is structured as follows:
# Each entry in the list is a tuple {item, successor_def} of type f, where item is an entry,
# successor_def is a list of tuples, each tuple of type f.
# The following precondition must be fulfilled: each child must occur after its parent in `entries`.
# TODO use type annotations, otherwise this function is difficult to grok.
@doc false
def split_children(parent, entries, pred) do
# Each entry must be unique, otherwise the MapSet will not work as supposed to.
unique_entries = Enum.with_index(entries)
modified_pred = fn {entry1, _idx1}, {entry2, _idx2} ->
pred.(entry1, entry2)
end
{{{^parent, -1}, children}, used} = successors({parent, -1}, unique_entries, modified_pred, MapSet.new)
rest = unique_entries
|> Enum.filter(&(!MapSet.member?(used, &1)))
|> Enum.map(fn {entry, _idx} -> entry end)
{{parent, Enum.map(children, &without_indices/1)}, rest}
end
@doc false
def without_indices({{x, idx}, descendants}) when is_integer(idx) do
{x, Enum.map(descendants, &without_indices/1)}
end
@doc false
def successors(parent, [], _pred, m = %MapSet{}), do: {{parent, []}, m}
def successors(parent, entries = [_|xs], pred, m = %MapSet{}) do
immediate_succs = Enum.filter(entries, fn entry ->
pred.(parent, entry)
end)
descendants = Enum.map(immediate_succs, fn child ->
successors(child, xs, pred, MapSet.put(m, child))
end)
map = Enum.reduce(descendants, m, fn ({_, map}, acc) ->
MapSet.union(map, acc)
end)
clean_descendants = Enum.map(descendants, fn {{x, y}, _map} -> {x, y} end)
{{parent, clean_descendants}, map}
end
end | lib/jerry/utils/list_utils.ex | 0.558688 | 0.515803 | list_utils.ex | starcoder |
defmodule HomeWeb.ApiEnergyController do
use HomeWeb, :controller
alias HomeWeb.Models.GraphModel
def gas_usage(conn, %{"group" => group, "start" => start_time, "end" => end_time}) do
validate_group(group)
validate_timestamp(start_time)
validate_timestamp(end_time)
# data = GraphModel.gas_usage_data(group, start_time, end_time)
data = %{}
json(conn, data)
end
def hourly_gas_usage(conn, _params) do
data = GraphModel.gas_usage_data(days_ago(3), now(), 1, "hour", "Hourly gas usage")
json(conn, data)
end
@spec daily_gas_usage(Plug.Conn.t(), any) :: Plug.Conn.t()
def daily_gas_usage(conn, _params) do
data = GraphModel.gas_usage_data("1d", days_ago(48), now(), "Daily gas usage")
json(conn, data)
end
def daily_gas_and_temp(conn, _params) do
data = GraphModel.daily_gas_and_temperature_data()
json(conn, data)
end
def gas_usage_per_temperature(conn, _params) do
data = GraphModel.gas_usage_per_temperature_data()
json(conn, data)
end
def gas_usage_per_temperature_per_year(conn, _params) do
data = GraphModel.gas_usage_per_temperature_per_year_data()
json(conn, data)
end
def electricity_usage(conn, %{"group" => group, "start" => start_time, "end" => end_time}) do
validate_group(group)
validate_timestamp(start_time)
validate_timestamp(end_time)
# data = GraphModel.electricity_usage_data(group, start_time, end_time)
data = %{}
json(conn, data)
end
def daily_electricity_usage(conn, _params) do
data =
GraphModel.electricity_usage_data(days_ago(48), now(), 1, "day", "Daily electricity usage")
json(conn, data)
end
def hourly_electricity_usage(conn, _params) do
data =
GraphModel.electricity_usage_data(days_ago(3), now(), 1, "hour", "Hourly electricity usage")
json(conn, data)
end
def current_electricity_usage(conn, _params) do
data = GraphModel.current_electricity_usage_data()
json(conn, data)
end
def compare_gas_usage(conn, %{
"p1start" => p1_start,
"p1end" => p1_end,
"p2start" => p2_start,
"p2end" => p2_end,
"ticks" => ticks
}) do
{p1_mean, p1_sd} = GraphModel.get_gas_mean_and_sd_of_period(p1_start, p1_end, ticks)
{p2_mean, p2_sd} = GraphModel.get_gas_mean_and_sd_of_period(p2_start, p2_end, ticks)
data = %{
title: "Gas hourly usage comparison",
labels: ["Period 1 mean", "Period 1 SD", "Period 2 mean", "Period 2 SD"],
datasets: [
%{
data: [p1_mean, p1_sd, p2_mean, p2_sd]
}
]
}
json(conn, data)
end
def compare_electricity_usage(conn, %{
"p1start" => p1_start,
"p1end" => p1_end,
"p2start" => p2_start,
"p2end" => p2_end,
"ticks" => ticks
}) do
{p1_mean, p1_sd} = GraphModel.get_electricity_mean_and_sd_of_period(p1_start, p1_end, ticks)
{p2_mean, p2_sd} = GraphModel.get_electricity_mean_and_sd_of_period(p2_start, p2_end, ticks)
data = %{
title: "Electricity hourly usage comparison",
labels: ["Period 1 mean", "Period 1 SD", "Period 2 mean", "Period 2 SD"],
datasets: [
%{
data: [p1_mean, p1_sd, p2_mean, p2_sd]
}
]
}
json(conn, data)
end
defp days_ago(days) do
DateTime.now!("Etc/UTC")
|> DateTime.add(-days * 24 * 60 * 60, :second)
end
defp now do
DateTime.now!("Etc/UTC")
end
defp validate_group(group) do
case Enum.member?(~w(1m 15m 1h 6h 1d 7d), group) do
true -> nil
_ -> raise "Unknown group_by"
end
end
defp validate_timestamp(timestamp) do
case DateTime.from_iso8601(timestamp) do
{:ok, _, _} -> nil
_ -> raise "Incorrect timestamp format"
end
end
end | lib/home_web/controllers/api_energy_controller.ex | 0.604049 | 0.515498 | api_energy_controller.ex | starcoder |
defmodule Geohax do
@moduledoc """
Geohash encoding and decoding.
"""
use Bitwise
import Integer, only: [is_even: 1]
@type direction() :: :north | :south | :east | :west
@base32 '<KEY>'
@lon_range {-180, 180}
@lat_range {-90, 90}
@neighbors [
north: {'<KEY>', '<KEY>'},
south: {'<KEY>', '<KEY>'},
east: {'<KEY>', '<KEY>'},
west: {'<KEY>', '<KEY>'}
]
@borders [
north: {'prxz', 'bcfguvyz'},
south: {'028b', '0145hjnp'},
east: {'bcfguvyz', 'prxz'},
west: {'0145hjnp', '028b'}
]
# API
@doc """
Encodes a position `{longitude, latitude}` to a Geohash of `precision`
length.
## Example
iex> Geohax.encode(-132.83, -38.1033, 6)
"311x1r"
"""
@spec encode(float(), float(), pos_integer()) :: String.t()
def encode(longitude, latitude, precision \\ 12) do
bencode(longitude, latitude, precision * 5) |> to_base32()
end
@doc """
Decodes a Geohash to a position `{longitude, latitude}`.
## Example
iex> Geohax.decode("311x1r")
{-132.83, -38.1033}
"""
@spec decode(String.t()) :: {float(), float()}
def decode(geohash) do
geohash
|> to_base10()
|> to_bits()
|> bdecode()
end
@doc """
Finds neighbors of a Geohash.
## Example
iex> Geohax.neighbors("311x1r")
%{north: "311x32", south: "311x1q", east: "311x1x", west: "311x1p"}
"""
@spec neighbors(String.t()) :: %{direction() => String.t()}
def neighbors(geohash) do
%{
north: neighbor(geohash, :north),
south: neighbor(geohash, :south),
east: neighbor(geohash, :east),
west: neighbor(geohash, :west)
}
end
@doc """
Finds neighbor of a Geohash in a given direction.
Allowed directions are `:north`, `:south`, `:east` and `:west`.
## Example
iex> Geohax.neighbor("311x1r", :north)
"311x32"
"""
@spec neighbor(String.t(), direction()) :: String.t()
def neighbor(geohash, direction) do
<<last::size(8)>> = String.last(geohash)
type = rem(String.length(geohash), 2)
base = String.slice(geohash, 0..-2)
if(last in elem(@borders[direction], type), do: neighbor(base, direction), else: base) <>
<<Enum.fetch!(
@base32,
:string.str(
elem(@neighbors[direction], type),
[last]
) - 1
)::size(8)>>
end
@doc """
Finds all the Geohashes within `{min_lon, min_lat}, {max_lon, max_lat}` with the
given `precision`.
## Examples
iex> Geohax.within({16.731831, 52.291725}, {17.071703, 52.508736})
["u37ck", "u37cm", "u37cq", "u37cr", "u3k12", "u3k13", "u3k16", "u3k17", "u3k1k", "u37cs", "u37ct", "u37cw", "u37cx", "u3k18", "u3k19", "u3k1d", "u3k1e", "u3k1s", "u37cu", "u37cv", "u37cy", "u37cz", "u3k1b", "u3k1c", "u3k1f", "u3k1g", "u3k1u", "u37fh", "u37fj", "u37fn", "u37fp", "u3k40", "u3k41", "u3k44", "u3k45", "u3k4h", "u37fk", "u37fm", "u37fq", "u37fr", "u3k42", "u3k43", "u3k46", "u3k47", "u3k4k", "u37fs", "u37ft", "u37fw", "u37fx", "u3k48", "u3k49", "u3k4d", "u3k4e", "u3k4s"]
iex> Geohax.within({16.731831, 52.291725}, {17.071703, 52.508736}, 3)
["u37", "u3k"]
"""
@spec within({float(), float()}, {float(), float()}, pos_integer()) :: [String.t()]
def within({min_lon, min_lat}, {max_lon, max_lat}, precision \\ 5) do
sw = encode(min_lon, min_lat, precision)
ne = encode(max_lon, max_lat, precision)
se = encode(max_lon, min_lat, precision)
south_border(ne, se, sw)
end
# Core
## Encoding
defp bencode(lon, lat, size) do
blon = encode_partial(lon, size - 1, @lon_range)
blat = encode_partial(lat, size - 2, @lat_range)
<<blon + blat::size(size)>>
end
defp encode_partial(_value, size, _range) when size < 0, do: 0
defp encode_partial(value, size, {min, max}) do
middle = avg(min, max)
if value < middle,
do: encode_partial(value, size - 2, {min, middle}),
else: exp2(size) + encode_partial(value, size - 2, {middle, max})
end
## Decoding
defp bdecode(hash) do
{lon_bits, lat_bits} =
hash
|> Enum.with_index()
|> Enum.split_with(fn {_, i} -> is_even(i) end)
lon = decode_partial(lon_bits, @lon_range)
lat = decode_partial(lat_bits, @lat_range)
{lon, lat}
end
defp decode_partial([], {min, max}), do: avg(min, max) |> to_fixed({min, max})
defp decode_partial([{0, _} | bits], {min, max}),
do: decode_partial(bits, {min, avg(min, max)})
defp decode_partial([{1, _} | bits], {min, max}),
do: decode_partial(bits, {avg(min, max), max})
# Helpers
defp exp2(n), do: :math.pow(2, n) |> round()
defp avg(x, y), do: (x + y) / 2
defp to_base32(n),
do: for(<<i::size(5) <- n>>, do: Enum.fetch!(@base32, i)) |> to_string()
defp to_base10(""), do: []
defp to_base10(<<char::size(8)>> <> str),
do: [:string.str(@base32, [char]) - 1 | to_base10(str)]
defp to_bits([]), do: []
defp to_bits([n | tail]), do: Enum.map(4..0, &bit_at(n, &1)) ++ to_bits(tail)
defp bit_at(bits, index), do: (1 <<< index &&& bits) >>> index
# Format the given coordinate to a fixed-point notation.
defp to_fixed(coord, {min, max}) do
precision = round(Float.floor(2 - :math.log10(max - min)))
Float.round(coord, precision)
end
defp south_border(ne, se, sw, acc \\ []) do
if sw in acc,
do: north_border(ne, acc),
else: south_border(ne, neighbor(se, :west), sw, [se | acc])
end
defp north_border(ne, row, acc \\ []) do
if ne in row,
do: acc ++ row,
else: north_border(ne, Enum.map(row, &neighbor(&1, :north)), acc ++ row)
end
end | lib/geohax.ex | 0.91563 | 0.534248 | geohax.ex | starcoder |
defmodule ElixirRigidPhysics.Geometry.Util do
@moduledoc """
Module to handle oddball util functions for geometry stuff.
"""
alias Graphmath.Vec3
@doc """
Function to get the closest point to a point `p` on a line segment spanning points `a` and `b`.
Excellent derviation [here](https://math.stackexchange.com/a/2193733).
## Examples
iex> # p coincident with a
iex> ElixirRigidPhysics.Geometry.Util.closest_point_on_line_to_point( {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
{0.0, 0.0, 0.0}
iex> # p coincident with b
iex> ElixirRigidPhysics.Geometry.Util.closest_point_on_line_to_point( {0.0, 1.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
{0.0, 1.0, 0.0}
iex> # p midway between a and b
iex> ElixirRigidPhysics.Geometry.Util.closest_point_on_line_to_point( {0.0, 0.5, 0.0}, {0.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
{0.0, 0.5, 0.0}
iex> # p closer to a
iex> ElixirRigidPhysics.Geometry.Util.closest_point_on_line_to_point( {0.0, -0.5, 0.0}, {0.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
{0.0, 0.0, 0.0}
iex> # p closer to b
iex> ElixirRigidPhysics.Geometry.Util.closest_point_on_line_to_point( {0.0, 2.5, 0.0}, {0.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
{0.0, 1.0, 0.0}
iex> # p far away from midpoint
iex> ElixirRigidPhysics.Geometry.Util.closest_point_on_line_to_point( {1000.0, 0.5, 0.0}, {0.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
{0.0, 0.5, 0.0}
iex> ElixirRigidPhysics.Geometry.Util.closest_point_on_line_to_point( {0.0, 0.5, 10000.0}, {0.0, 0.0, 0.0}, {0.0, 1.0, 0.0})
{0.0, 0.5, 0.0}
"""
@spec closest_point_on_line_to_point(Vec3.vec3(), Vec3.vec3(), Vec3.vec3()) :: Vec3.vec3()
def closest_point_on_line_to_point(p, a, b) do
v = Vec3.subtract(b, a)
u = Vec3.subtract(a, p)
dvu = Vec3.dot(v, u)
t = -(dvu / Vec3.dot(v, v))
if t >= 0.0 and t <= 1.0 do
Vec3.add(Vec3.scale(a, 1 - t), Vec3.scale(b, t))
else
g0 = Vec3.length_squared(u)
g1 = Vec3.length(v) + 2.0 * dvu + g0
if g0 > g1 do
b
else
a
end
end
end
@doc """
Gets the distance and two closest points for a pair of line segments.
Algorithm adapted from <NAME>'s [c++ implementation](https://www.geometrictools.com/GTEngine/Include/Mathematics/GteDistSegmentSegment.h).
You really probably want to read 10.8.2 in _Geometric Tools for Computer Graphics_ to have a chance in hell of understanding this.
Even then, you'll still want to see Eberly's [writeup](https://www.geometrictools.com/Documentation/DistanceLine3Line3.pdf), since the algo here is different than the book.
## Examples
iex> # check mutual degeneracy
iex> alias ElixirRigidPhysics.Geometry.Util, as: GUtil
iex> p = { {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}
iex> q = { {0.0, 2.0, 0.0}, {0.0, 2.0, 0.0}}
iex> GUtil.nearest_points_for_segments( p, q )
{2.0, {0.0, 0.0, 0.0}, {0.0, 2.0, 0.0}}
iex> # check p degeneracy
iex> alias ElixirRigidPhysics.Geometry.Util, as: GUtil
iex> p = { {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}
iex> q = { {0.0, 2.0, 0.0}, {0.0, 4.0, 0.0}}
iex> GUtil.nearest_points_for_segments( p, q )
{2.0, {0.0, 0.0, 0.0}, {0.0, 2.0, 0.0}}
iex> # check q degeneracy
iex> alias ElixirRigidPhysics.Geometry.Util, as: GUtil
iex> p = { {0.0, 0.0, 0.0}, {2.0, 0.0, 0.0}}
iex> q = { {0.0, 2.0, 0.0}, {0.0, 2.0, 0.0}}
iex> GUtil.nearest_points_for_segments( p, q )
{2.0, {0.0, 0.0, 0.0}, {0.0, 2.0, 0.0}}
iex> # check intersecting segments
iex> alias ElixirRigidPhysics.Geometry.Util, as: GUtil
iex> GUtil.nearest_points_for_segments( { {-1.0,0.0,0.0}, {1.0,0.0,0.0}}, {{0.0,-1.0,0.0},{0.0, 1.0, 0.0}})
{0.0, {0.0,0.0,0.0}, {0.0, 0.0, 0.0}}
iex> # check for corner intersection
iex> alias ElixirRigidPhysics.Geometry.Util, as: GUtil
iex> p = { {0.0, 0.0, 0.0}, {2.0, 0.0, 0.0}}
iex> q = { {2.0, 0.0, 0.0}, {2.0, 2.0, 0.0}}
iex> GUtil.nearest_points_for_segments( p, q )
{0.0, {2.0, 0.0, 0.0}, {2.0, 0.0, 0.0}}
iex> # check raised non-intersection
iex> alias ElixirRigidPhysics.Geometry.Util, as: GUtil
iex> GUtil.nearest_points_for_segments( { {-1.0,0.0,0.0}, {1.0,0.0,0.0}}, {{0.0,-1.0,1.0},{0.0, 1.0, 1.0}})
{1.0, {0.0,0.0,0.0}, {0.0, 0.0, 1.0}}
iex> # check collinear non-intersection
iex> alias ElixirRigidPhysics.Geometry.Util, as: GUtil
iex> p = { {1.0, 1.0, 1.0}, {3.0, 3.0, 3.0}}
iex> q = { {-2.0, -2.0, -2.0}, {-5.0, -5.0, -5.0}}
iex> sqrt_27 = :math.sqrt(27)
iex> { sqrt_27, {1.0, 1.0, 1.0}, {-2.0, -2.0, -2.0}} == GUtil.nearest_points_for_segments( p, q )
true
"""
@spec nearest_points_for_segments({Vec3.t(), Vec3.t()}, {Vec3.t(), Vec3.t()}) ::
{float, Vec3.t(), Vec3.t()}
def nearest_points_for_segments({p0, p1}, {q0, q1}) do
p1_to_p0 = Vec3.subtract(p1, p0)
q1_to_q0 = Vec3.subtract(q1, q0)
p0_to_q0 = Vec3.subtract(p0, q0)
a = Vec3.dot(p1_to_p0, p1_to_p0)
b = Vec3.dot(p1_to_p0, q1_to_q0)
c = Vec3.dot(q1_to_q0, q1_to_q0)
d = Vec3.dot(p1_to_p0, p0_to_q0)
e = Vec3.dot(q1_to_q0, p0_to_q0)
f00 = d
f10 = f00 + a
f01 = f00 - b
f11 = f10 - b
g00 = -e
g10 = g00 - b
g01 = g00 + c
g11 = g10 + c
# check for segment degeneracy
{s, t} =
cond do
a > 0.0 and c > 0.0 ->
# both segments are valid
s_value_0 = get_clamped_root(a, f00, f10)
s_value_1 = get_clamped_root(a, f01, f11)
classify_0 =
cond do
s_value_0 <= 0.0 -> -1.0
s_value_0 >= 1.0 -> 1.0
true -> 0.0
end
classify_1 =
cond do
s_value_1 <= 0.0 -> -1.0
s_value_1 >= 1.0 -> 1.0
true -> 0.0
end
cond do
classify_0 == -1.0 and classify_1 == -1.0 ->
{0.0, get_clamped_root(c, g00, g01)}
classify_0 == 1.0 and classify_1 == 1.0 ->
{1.0, get_clamped_root(c, g10, g11)}
true ->
r_coeffs = {a, b, c, d, e}
g = {g00, g01, g10, g11}
{edges, ends} =
compute_intersection(
{s_value_0, s_value_1},
{classify_0, classify_1},
b,
f00,
f10
)
compute_minimum_parameters(edges, ends, r_coeffs, g)
end
# q segment is degenerate
a > 0.0 ->
{get_clamped_root(a, f00, f10), 0.0}
# p segment is degenerate
c > 0.0 ->
{0.0, get_clamped_root(c, g00, g01)}
# both segments are degenerate!
true ->
{0.0, 0.0}
end
p_nearest = Vec3.lerp(p0, p1, s)
q_nearest = Vec3.lerp(q0, q1, t)
distance = p_nearest
|> Vec3.subtract(q_nearest)
|> Vec3.length()
{distance, p_nearest, q_nearest}
end
@spec compute_intersection({number, number}, {number, number}, number, number, number) ::
{{number, number}, {number, number, number, number}}
defp compute_intersection({s_value_0, s_value_1}, {classify_0, classify_1}, b, f00, f10) do
cond do
classify_0 < 0.0 ->
edge_0 = 0
end_00 = 0.0
end_01 = if b == 0.0, do: 0.5, else: f00 / b
end_01 = if end_01 < 0.0 or end_01 > 1.0, do: 0.5, else: end_01
{edge_1, end_10, end_11} =
if classify_1 == 0 do
edge_1 = 3
end_10 = s_value_1
end_11 = 1.0
{edge_1, end_10, end_11}
else
edge_1 = 1
end_10 = 1.0
end_11 = if b == 0.0, do: 0.5, else: f10 / b
end_11 = if end_11 < 0.0 or end_11 > 1.0, do: 0.5, else: end_11
{edge_1, end_10, end_11}
end
{{edge_0, edge_1}, {end_00, end_01, end_10, end_11}}
classify_0 == 0.0 ->
edge_0 = 2
end_00 = s_value_0
end_01 = 0.0
{edge_1, end_10, end_11} =
cond do
classify_1 < 0.0 ->
edge_1 = 0
end_10 = 0.0
end_11 = if b == 0.0, do: 0.5, else: f00 / b
end_11 = if end_11 < 0.0 or end_11 > 1.0, do: 0.5, else: end_11
{edge_1, end_10, end_11}
classify_1 == 0.0 ->
{3, s_value_1, 1.0}
true ->
edge_1 = 1
end_10 = 1.0
end_11 = if b == 0.0, do: 0.5, else: f10 / b
end_11 = if end_11 < 0.0 or end_11 > 1.0, do: 0.5, else: end_11
{edge_1, end_10, end_11}
end
{{edge_0, edge_1}, {end_00, end_01, end_10, end_11}}
true ->
edge_0 = 1
end_00 = 1.0
end_01 = if b == 0.0, do: 0.5, else: f10 / b
end_01 = if end_01 < 0.0 or end_01 > 1.0, do: 0.5, else: end_01
{edge_1, end_10, end_11} =
if classify_1 == 0.0 do
{3, s_value_1, 1.0}
else
end_1 = 0
end_10 = 0.0
end_11 = if b == 0.0, do: 0.5, else: f00 / b
end_11 = if end_11 < 0.0 or end_11 > 1.0, do: 0.5, else: end_11
{end_1, end_10, end_11}
end
{{edge_0, edge_1}, {end_00, end_01, end_10, end_11}}
end
end
@spec compute_minimum_parameters(
{number, number},
{number, number, number, number},
{number, number, number, number, number},
{number, number, number, number}
) :: {number, number}
defp compute_minimum_parameters(
{edge_0, edge_1},
{end_00, end_01, end_10, end_11},
{_a, b, c, _d, e},
{g00, g01, g10, g11}
) do
delta = end_11 - end_01
h0 = delta * (-b * end_00 + c * end_01 - e)
if h0 >= 0.0 do
case edge_0 do
0 -> {0.0, get_clamped_root(c, g00, g01)}
1 -> {1.0, get_clamped_root(c, g10, g11)}
_ -> {end_00, end_01}
end
else
h1 = delta * (-b * end_10 + c * end_11 - e)
if h1 <= 0.0 do
case edge_1 do
0 -> {0.0, get_clamped_root(c, g00, g01)}
1 -> {1.0, get_clamped_root(c, g10, g11)}
_ -> {end_10, end_11}
end
else
z = min(max(h0 / (h0 - h1), 0.0), 1.0)
omz = 1.0 - z
{omz * end_00 + z * end_10, omz * end_01 + z * end_11}
end
end
end
@doc """
Gets the root `z` of the linear function `h(z) = h(0) + sigma * z` on the interval [0,1], or the clamped root to the interval [0,1].
Requires `h(0)` and `h(1) = h(0) + sigma` to have opposite signs (implying root on real line). Thanks Eberly! :)
## Examples
iex> # test h0 >= 0
iex> ElixirRigidPhysics.Geometry.Util.get_clamped_root(42, 1, 0)
0.0
iex> # test h1 <= 0
iex> ElixirRigidPhysics.Geometry.Util.get_clamped_root(42, -1, -3)
1.0
iex> # test sigma being 0
iex> ElixirRigidPhysics.Geometry.Util.get_clamped_root(0.0, -1, 3)
0.5
iex> # test root over interval of [0,1]
iex> ElixirRigidPhysics.Geometry.Util.get_clamped_root(0.05, -1, 3)
0.5
iex> # test normal behavior
iex> ElixirRigidPhysics.Geometry.Util.get_clamped_root(1, -0.2, 1)
0.2
"""
@spec get_clamped_root(number(), number(), number()) :: number()
def get_clamped_root(sigma, h0, h1) do
cond do
h0 >= 0.0 ->
0.0
h1 <= 0 ->
1.0
# add check because original C++ code -h0/0 -> infinity, but in Elixir would throw
sigma == 0.0 ->
0.5
# Eberly suggests that this can be replaced with a bisection routine for `h(z)`, but that's slow
true ->
root = -h0 / sigma
if root > 1 do
0.5
else
root
end
end
end
end | lib/geometry/util.ex | 0.907458 | 0.697374 | util.ex | starcoder |
defmodule RDF.PropertyMap do
@moduledoc """
A bidirectional mappings from atom names to `RDF.IRI`s of properties.
These mapping can be used in all functions of the RDF data structures
to provide the meaning of the predicate terms in input statements or
define how the IRIs of predicates should be mapped with the value mapping
functions like `RDF.Description.values/2` etc.
The `:context` option of these functions either take a `RDF.PropertyMap` directly
or anything from which a `RDF.PropertyMap` can be created with `new/1`.
Because the mapping is bidirectional each term and IRI can be used only in
one mapping of a `RDF.PropertyMap`.
`RDF.PropertyMap` implements the `Enumerable` protocol and the `Access` behaviour.
"""
defstruct iris: %{}, terms: %{}
alias RDF.IRI
import RDF.Guards
import RDF.Utils, only: [downcase?: 1]
@type coercible_term :: atom | String.t()
@type t :: %__MODULE__{
iris: %{atom => IRI.t()},
terms: %{IRI.t() => atom}
}
@type input :: t | map | keyword | RDF.Vocabulary.Namespace.t()
@behaviour Access
@doc """
Creates an empty `RDF.PropertyMap`.
"""
@spec new :: t
def new(), do: %__MODULE__{}
@doc """
Creates a new `RDF.PropertyMap` with initial mappings.
See `add/2` for the different forms in which mappings can be provided.
"""
@spec new(input) :: t
def new(%__MODULE__{} = initial), do: initial
def new(initial) do
{:ok, property_map} = new() |> add(initial)
property_map
end
@doc false
def from_opts(opts)
def from_opts(nil), do: nil
def from_opts(opts), do: if(property_map = Keyword.get(opts, :context), do: new(property_map))
@doc """
Returns the list of all terms in the given `property_map`.
"""
@spec terms(t) :: [atom]
def terms(%__MODULE__{iris: iris}), do: Map.keys(iris)
@doc """
Returns the list of all IRIs in the given `property_map`.
"""
@spec iris(t) :: [IRI.t()]
def iris(%__MODULE__{terms: terms}), do: Map.keys(terms)
@doc """
Returns the IRI for the given `term` in `property_map`.
Returns `nil`, when the given `term` is not present in `property_map`.
"""
@spec iri(t, coercible_term) :: IRI.t() | nil
def iri(%__MODULE__{} = property_map, term) do
Map.get(property_map.iris, coerce_term(term))
end
@doc """
Returns the term for the given `namespace` in `prefix_map`.
Returns `nil`, when the given `namespace` is not present in `prefix_map`.
"""
@spec term(t, IRI.coercible()) :: atom | nil
def term(%__MODULE__{} = property_map, iri) do
Map.get(property_map.terms, IRI.new(iri))
end
@doc """
Returns whether a mapping for the given `term` is defined in `property_map`.
"""
@spec iri_defined?(t, coercible_term) :: boolean
def iri_defined?(%__MODULE__{} = property_map, term) do
Map.has_key?(property_map.iris, coerce_term(term))
end
@doc """
Returns whether a mapping for the given `iri` is defined in `property_map`.
"""
@spec term_defined?(t, IRI.coercible()) :: boolean
def term_defined?(%__MODULE__{} = property_map, iri) do
Map.has_key?(property_map.terms, IRI.new(iri))
end
@impl Access
def fetch(%__MODULE__{} = property_map, term) do
Access.fetch(property_map.iris, coerce_term(term))
end
@doc """
Adds a property mapping between `term` and `iri` to `property_map`.
Unless another mapping for `term` or `iri` already exists, an `:ok` tuple
is returned, otherwise an `:error` tuple.
"""
@spec add(t, coercible_term, IRI.coercible()) :: {:ok, t} | {:error, String.t()}
def add(%__MODULE__{} = property_map, term, iri) do
do_set(property_map, :add, coerce_term(term), IRI.new(iri))
end
@doc """
Adds a set of property mappings to `property_map`.
The mappings can be passed in various ways:
- as keyword lists or maps where terms for the RDF properties can
be given as atoms or strings, while the property IRIs can be given as
`RDF.IRI`s or strings
- a strict `RDF.Vocabulary.Namespace` from which all lowercased terms are added
with their respective IRI; since IRIs can also be once in a
`RDF.PropertyMap` a defined alias term is preferred over an original term
- another `RDF.PropertyMap` from which all mappings are merged
Unless a mapping for any of the terms or IRIs in the `input` already exists,
an `:ok` tuple is returned, otherwise an `:error` tuple.
"""
@spec add(t, input) :: {:ok, t} | {:error, String.t()}
def add(%__MODULE__{} = property_map, vocab_namespace) when maybe_ns_term(vocab_namespace) do
cond do
not RDF.Vocabulary.Namespace.vocabulary_namespace?(vocab_namespace) ->
raise ArgumentError, "expected a vocabulary namespace, but got #{vocab_namespace}"
not apply(vocab_namespace, :__strict__, []) ->
raise ArgumentError,
"expected a strict vocabulary namespace, but #{vocab_namespace} is non-strict"
true ->
add(property_map, mapping_from_vocab_namespace(vocab_namespace))
end
end
def add(%__MODULE__{} = property_map, mappings) do
Enum.reduce_while(mappings, {:ok, property_map}, fn {term, iri}, {:ok, property_map} ->
with {:ok, property_map} <- add(property_map, term, iri) do
{:cont, {:ok, property_map}}
else
error -> {:halt, error}
end
end)
end
@doc """
Adds a set of property mappings to `property_map` and raises an error on conflicts.
See `add/2` for the different forms in which mappings can be provided.
"""
@spec add!(t, input) :: t
def add!(%__MODULE__{} = property_map, mappings) do
case add(property_map, mappings) do
{:ok, property_map} -> property_map
{:error, error} -> raise error
end
end
@doc """
Adds a property mapping between `term` and `iri` to `property_map` overwriting existing mappings.
"""
@spec put(t, coercible_term, IRI.coercible()) :: t
def put(%__MODULE__{} = property_map, term, iri) do
{:ok, added} = do_set(property_map, :put, coerce_term(term), IRI.new(iri))
added
end
@doc """
Adds a set of property mappings to `property_map` overwriting all existing mappings.
See `add/2` for the different forms in which mappings can be provided.
Note, that not just all mappings with the used terms in the input `mappings`
are overwritten, but also all mappings with IRIs in the input `mappings`
"""
@spec put(t, input) :: t
def put(%__MODULE__{} = property_map, mappings) do
Enum.reduce(mappings, property_map, fn {term, iri}, property_map ->
put(property_map, term, iri)
end)
end
defp do_set(property_map, op, term, iri) do
do_set(property_map, op, term, iri, Map.get(property_map.iris, term))
end
defp do_set(property_map, op, term, new_iri, old_iri) do
do_set(property_map, op, term, new_iri, old_iri, Map.get(property_map.terms, new_iri))
end
defp do_set(property_map, _, _, iri, iri, _), do: {:ok, property_map}
defp do_set(property_map, _, term, iri, nil, nil) do
{:ok,
%__MODULE__{
property_map
| iris: Map.put(property_map.iris, term, iri),
terms: Map.put(property_map.terms, iri, term)
}}
end
defp do_set(_context, :add, term, new_iri, old_iri, nil) do
{:error, "conflicting mapping for #{term}: #{new_iri}; already mapped to #{old_iri}"}
end
defp do_set(_context, :add, term, iri, _, old_term) do
{:error,
"conflicting mapping for #{term}: #{iri}; IRI already mapped to #{inspect(old_term)}"}
end
defp do_set(property_map, :put, term, new_iri, old_iri, nil) do
%__MODULE__{property_map | terms: Map.delete(property_map.terms, old_iri)}
|> do_set(:put, term, new_iri, nil, nil)
end
defp do_set(property_map, :put, term, new_iri, old_iri, old_term) do
%__MODULE__{property_map | iris: Map.delete(property_map.iris, old_term)}
|> do_set(:put, term, new_iri, old_iri, nil)
end
@doc """
Deletes the property mapping for `term` from `property_map`.
If no mapping for `term` exists, `property_map` is returned unchanged.
"""
@spec delete(t, coercible_term) :: t
def delete(%__MODULE__{} = property_map, term) do
term = coerce_term(term)
if iri = Map.get(property_map.iris, term) do
%__MODULE__{
property_map
| iris: Map.delete(property_map.iris, term),
terms: Map.delete(property_map.terms, iri)
}
else
property_map
end
end
@doc """
Drops the given `terms` from the `property_map`.
If `terms` contains terms that are not in `property_map`, they're simply ignored.
"""
@spec drop(t, [coercible_term]) :: t
def drop(%__MODULE__{} = property_map, terms) when is_list(terms) do
Enum.reduce(terms, property_map, fn term, property_map ->
delete(property_map, term)
end)
end
defp coerce_term(term) when is_atom(term), do: term
defp coerce_term(term) when is_binary(term), do: String.to_atom(term)
defp mapping_from_vocab_namespace(vocab_namespace) do
aliases = apply(vocab_namespace, :__term_aliases__, [])
apply(vocab_namespace, :__terms__, [])
|> Enum.filter(&downcase?/1)
|> Enum.map(fn term -> {term, apply(vocab_namespace, term, [])} end)
|> Enum.group_by(fn {_term, iri} -> iri end)
|> Map.new(fn
{_, [mapping]} ->
mapping
{_, mappings} ->
Enum.find(mappings, fn {term, _iri} -> term in aliases end) ||
raise "conflicting non-alias terms for IRI should not occur in a vocab namespace"
end)
end
@impl Access
def pop(%__MODULE__{} = property_map, term) do
case Access.pop(property_map.iris, coerce_term(term)) do
{nil, _} ->
{nil, property_map}
{iri, new_context_map} ->
{iri, %__MODULE__{iris: new_context_map}}
end
end
@impl Access
def get_and_update(property_map, term, fun) do
term = coerce_term(term)
current = iri(property_map, term)
case fun.(current) do
{old_iri, new_iri} ->
{:ok, property_map} = do_set(property_map, :put, term, IRI.new(new_iri), IRI.new(old_iri))
{old_iri, property_map}
:pop ->
{current, delete(property_map, term)}
other ->
raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}"
end
end
defimpl Enumerable do
alias RDF.PropertyMap
def reduce(%PropertyMap{iris: iris}, acc, fun), do: Enumerable.reduce(iris, acc, fun)
def member?(%PropertyMap{iris: iris}, mapping), do: Enumerable.member?(iris, mapping)
def count(%PropertyMap{iris: iris}), do: Enumerable.count(iris)
def slice(%PropertyMap{iris: iris}), do: Enumerable.slice(iris)
end
defimpl Inspect do
import Inspect.Algebra
def inspect(property_map, opts) do
map = Map.to_list(property_map.iris)
open = color("%RDF.PropertyMap{", :map, opts)
sep = color(",", :map, opts)
close = color("}", :map, opts)
container_doc(open, map, close, opts, &to_map(&1, &2, color(" <=> ", :map, opts)),
separator: sep,
break: :strict
)
end
defp to_map({key, value}, opts, sep) do
concat(concat(to_doc(key, opts), sep), to_doc(value, opts))
end
end
end | lib/rdf/property_map.ex | 0.928409 | 0.785679 | property_map.ex | starcoder |
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it. If you have received this file from a source other than Adobe,
# then your use, modification, or distribution of it requires the prior
# written permission of Adobe.
defmodule BotArmy.Actions do
@moduledoc """
Generic Actions.
Actions are functions that take the bot's context and any supplied arguments,
perform some useful side effects, and then return the outcome. The context is
always passed as the first argument.
Valid outcomes are: `:succeed`, `:fail`, `:continue`, `:done` or `{:error,
reason}`.
`:succeed`, `:fail`, and `:continue` can also be in the form of `{:succeed, key:
"value"}` if you want save/update the context.
"""
require Logger
@typedoc """
Actions must return one of these outcomes.
"""
@type outcome ::
:succeed
| :fail
| :continue
| :done
| {:error, any()}
| {:succeed, keyword()}
| {:fail, keyword()}
| {:continue, keyword()}
@doc """
A semantic helper to define actions in your behavior tree.
Node.sequence([
...
action(BotArmy.Actions, :wait, [5]),
...
action(BotArmy.Actions, :done)
])
"""
def action(module, fun, args \\ []) do
{module, fun, args}
end
@doc """
Makes the calling process wait for the given number of seconds
"""
def wait(_context, s \\ 5) do
Process.sleep(trunc(1000 * s))
:succeed
end
@doc """
Makes the calling process wait for a random number of seconds in the range defined
by the given integers min and max
"""
def wait(_context, min, max) when is_integer(min) and is_integer(max) do
Process.sleep(1000 * Enum.random(min..max))
:succeed
end
@doc """
Given a rate as a percentage, this will succeed that percent of the time, and fail
otherwise.
For example `succeed_rate(context, 0.25)` will succeed on average 1 our of 4 tries.
"""
def succeed_rate(_context, rate) when is_float(rate) and rate < 1 and rate > 0 do
if :rand.uniform() <= rate,
do: :succeed,
else: :fail
end
@doc """
This will stop the bot from running (by default bots "loop" continously through
their behavior trees
"""
def done(_), do: :done
@doc """
Signal that this bot has errored, causing the bot's process to die with the given
reason.
"""
def error(_, reason), do: {:error, reason}
@doc """
A helpful way to "tap" the flow of the behavior tree for debugging.
"""
def log(_context, message) do
Logger.info(message)
:succeed
end
end | lib/actions.ex | 0.68215 | 0.448426 | actions.ex | starcoder |
defmodule GenSpoxy.Periodic.TasksExecutor do
@moduledoc """
a behaviour for running prerender tasks periodically.
when we execute a `spoxy` reqeust and have a cache miss returning a stale date,
we may choose to return the stale data and queue a background task.
"""
@callback execute_tasks!(req_key :: String.t(), req_tasks :: Array) :: :ok
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
use GenServer
use GenSpoxy.Partitionable
alias GenSpoxy.Defaults
@sampling_interval Keyword.get(
opts,
:periodic_sampling_interval,
Defaults.periodic_sampling_interval()
)
default_partitions =
Keyword.get(
opts,
:total_partitions,
Defaults.total_partitions()
)
@total_partitions Keyword.get(
opts,
:periodic_total_partitions,
default_partitions
)
def start_link(opts) do
GenServer.start_link(__MODULE__, :ok, opts)
end
def enqueue_task(req_key, task) do
server = lookup_req_server(req_key)
GenServer.cast(server, {:enqueue_task, req_key, task})
end
# callbacks
@impl true
def init(_opts) do
Process.send_after(self(), :execute_tasks, @sampling_interval)
{:ok, %{}}
end
@impl true
def handle_cast({:enqueue_task, req_key, task}, state) do
req_tasks = Map.get(state, req_key, [])
new_state = Map.put(state, req_key, [task | req_tasks])
{:noreply, new_state}
end
@impl true
def handle_info(:execute_tasks, state) do
Process.send_after(self(), :execute_tasks, @sampling_interval)
Enum.each(state, fn {req_key, req_tasks} ->
Task.start(fn ->
execute_tasks!(req_key, req_tasks)
end)
end)
{:noreply, %{}}
end
@impl true
def total_partitions do
@total_partitions
end
@impl true
def calc_req_partition(req_key) do
1 + :erlang.phash2(req_key, @total_partitions)
end
defp lookup_req_server(req_key) do
partition = calc_req_partition(req_key)
partition_server(partition)
end
end
end
end | lib/periodic/tasks_executor.ex | 0.844265 | 0.417568 | tasks_executor.ex | starcoder |
defmodule TheElixir.Components.Journal do
@moduledoc """
A place to keep quests in a handy and organized way!
"""
use GenServer
# Client API
def start_link(name \\ :journal) do
GenServer.start_link(__MODULE__, name, name: name)
end
@doc """
Lookup a quest with `quest_name` in `journal` if it exists
Returns `{:ok, quest}` on success, `:error` otherwise
"""
def lookup(journal, quest_name) do
case :ets.lookup(journal, quest_name) do
[{^quest_name, quest}] -> {:ok, quest}
[] -> :error
end
end
@doc """
Adds a `quest` with `quest_name` to `journal`
Returns `quest`, call because of race condition problems
"""
def add(journal, quest_name, quest) do
GenServer.call(journal, {:add, quest_name, quest})
end
@doc """
Removes a quest with `quest_name` from `journal`
"""
def delete(journal, quest_name) do
GenServer.cast(journal, {:delete, quest_name})
end
@doc """
Get a list of quest names in `journal`
"""
def get(journal) do
GenServer.call(journal, {:get, []})
end
@doc """
Stop the journal process
"""
def stop(journal) do
GenServer.stop(journal)
end
# Server callbacks
def init(table) do
journal = :ets.new(table, [:named_table, read_concurrency: true])
{:ok, journal}
end
def handle_call({:add, quest_name, quest}, _from, journal) do
case lookup(journal, quest_name) do
{:ok, quest} ->
{:reply, {:ok, quest}, journal}
:error ->
:ets.insert(journal, {quest_name, quest})
{:reply, {:ok, quest}, journal}
end
end
def handle_call({:get, []}, _from, journal) do
quest_names = :ets.match(journal, {:"$1", :_})
quest_names = List.flatten(quest_names)
{:reply, quest_names, journal}
end
def handle_cast({:delete, quest_name}, journal) do
case lookup(journal, quest_name) do
{:ok, _} ->
:ets.delete(journal, quest_name)
{:noreply, journal}
:error ->
{:noreply, journal}
end
end
end | lib/the_elixir/components/journal/journal.ex | 0.640299 | 0.477432 | journal.ex | starcoder |
defmodule BinaryDiagnostic do
@doc """
Part one: Find the power consumption of a submarine
"""
def power_consumption(path \\ "./puzzle_input.txt") do
diagnostic_report = parse(path)
num_column = String.length(List.first(diagnostic_report))
diagnostic_report = diagnostic_report
|> Enum.join()
|> String.split("", trim: true)
transformed_report = transform(num_column, diagnostic_report, num_column)
gamma_rate = Enum.map(transformed_report, fn r ->
%{"0" => zero_count, "1" => one_count} = Enum.frequencies(r)
if zero_count > one_count, do: "0", else: "1"
end)
|> Enum.reverse()
epsilon_rate = Enum.map(gamma_rate, fn x -> if x == "1", do: "0", else: "1" end)
to_decimal(gamma_rate) * to_decimal(epsilon_rate)
end
@doc """
Part two: Find the life support rating
"""
def life_support_rating(path \\ "./puzzle_input.txt") do
diagnostic_report = parse(path)
num_column = String.length(List.first(diagnostic_report))
oxygen_generator_rating = filter_rating(:>=, diagnostic_report, num_column)
co2_scrubber_rating = filter_rating(:<, diagnostic_report, num_column)
to_decimal(oxygen_generator_rating) * to_decimal(co2_scrubber_rating)
end
defp filter_rating(bit_criteria, diagnostic_report, num_column, index \\ 0)
defp filter_rating(_, [_ | [] ] = diagnostic_report, _, _), do: diagnostic_report
defp filter_rating(bit_criteria, diagnostic_report, num_column, index) do
flat_report = diagnostic_report
|> Enum.join()
|> String.split("", trim: true)
transformed_report = transform(num_column, flat_report, num_column) |> Enum.reverse()
bit_row = Enum.at(transformed_report, index)
%{"0" => zero_count, "1" => one_count} = Enum.frequencies(bit_row)
bit_condition = apply(Kernel, bit_criteria, [one_count, zero_count])
bit = if bit_condition, do: "1", else: "0"
diagnostic_report = Enum.filter(diagnostic_report, fn n ->
String.at(n, index) == bit
end)
index = index + 1
filter_rating(bit_criteria, diagnostic_report, num_column, index)
end
defp parse(path) do
{:ok, input} = File.read(path)
input
|> String.split("\n")
end
defp transform(num_column, report, index, acc \\ [])
defp transform(_num_column, _report, 0, acc), do: acc
defp transform(num_column, report, index, acc) do
row = Enum.take_every(report, num_column)
[_ | tail ] = report
index = index - 1
transform(num_column, tail, index, [row | acc])
end
defp to_decimal(binary) do
binary = Enum.join(binary)
{decimal, _} = Integer.parse(binary, 2)
decimal
end
end
IO.inspect BinaryDiagnostic.power_consumption()
IO.inspect BinaryDiagnostic.life_support_rating() | 2021/elixir/day-3-binary-diagnostic/binary_diagnostic.ex | 0.700075 | 0.625667 | binary_diagnostic.ex | starcoder |
defmodule Size do
defstruct height: 0, width: 0
def new({x, y}), do: %Size{height: y, width: x}
end
defmodule Canvas do
defstruct [:pixels, :size]
def new(%Size{height: height, width: width} = size, color \\ Color.named(:black)) do
%Canvas{
pixels:
:array.new(
size: height * width,
default: Color.to_list(color),
fixed: true
),
size: size
}
end
def put(canvas, coords, color \\ Color.named(:white))
def put(%Canvas{size: %{width: width, height: height} = size}, {x, y}, _)
when x >= width or y >= height do
raise Canvas.OutOfBoundError, coords: {x, y}, size: size
end
def put(%Canvas{pixels: pixels, size: size}, {x, y}, color) do
%Canvas{
pixels: :array.set(x + size.width * y, Color.to_list(color), pixels),
size: size
}
end
def pixel_data(canvas) do
:array.to_list(canvas.pixels)
end
def line(canvas, a, b, color \\ Color.named(:white))
def line(canvas, {x0, y0}, {x1, y1}, _) when x0 == x1 and y0 == y1, do: canvas
def line(canvas, {x0, y0}, {x1, y1}, color) do
{a, b, c, d, flip} =
if abs(x0 - x1) > abs(y0 - y1) do
{x0, x1, y0, y1, false}
else
{y0, y1, x0, x1, true}
end
delta = b - a
Enum.reduce(a..b, canvas, fn x, canvas ->
t = (x - a) / delta
y = trunc(c * (1 - t) + d * t)
put(
canvas,
if not flip do
{x, y}
else
{y, x}
end,
color
)
end)
end
def flip(%Canvas{pixels: pixels} = canvas) do
%Size{width: width} = canvas.size
pixels =
pixels
|> :array.to_list()
|> Stream.chunk_every(width)
|> Enum.reverse()
|> (fn l -> :array.from_list(l) end).()
%Canvas{canvas | pixels: pixels}
end
defmodule OutOfBoundError do
defexception [:message]
@impl true
def exception(coords: coords, size: size) do
msg = "Out of bound (#{inspect(size)}): #{inspect(coords)}"
%OutOfBoundError{message: msg}
end
end
end
defimpl Inspect, for: Canvas do
def inspect(canvas, _) do
"%Canvas{pixels: PIXEL_DATA, size: #{inspect(canvas.size)}}"
end
end | lib/canvas.ex | 0.747892 | 0.805785 | canvas.ex | starcoder |
defmodule FloUI.Theme do
@moduledoc """
Basic theme for sets for FloUI
``` elixir
:base,
:dark,
:light,
:primary,
:scrollbar,
:secondary,
:success,
:danger,
:warning,
:info,
:text
```
Pick a preset
``` elixir
FloUI.Theme.preset(:primary)
```
"""
alias Scenic.Primitive.Style.Paint.Color
@flo_base %{
text: :white,
active_text: :black,
background: {64, 64, 64},
border: :light_grey,
active: :steel_blue,
thumb: :steel_blue,
focus: :steel_blue,
active_text: :black
}
@flo_dark Map.merge(@flo_base, %{background: :black})
@flo_light Map.merge(@flo_base, %{text: :black, active_text: :white, background: :gainsboro})
# @flo_primary Map.merge(@flo_base, %{
# text: :black,
# background: :grey,
# border: {84, 84, 84},
# active: {40, 40, 40}
# })
@scrollbar Map.merge(@flo_base, %{
text: :black,
active_text: :black,
background: :grey,
border: {84, 84, 84},
active: {40, 40, 40}
})
# specialty themes
@primary Map.merge(@flo_base, %{text: :white, active_text: :black, background: :steel_blue, active: {8, 86, 136}})
@secondary Map.merge(@flo_base, %{background: {111, 117, 125}, active_text: :black, active: {86, 90, 95}})
@success Map.merge(@flo_base, %{background: {99, 163, 74}, active_text: :black, active: {74, 123, 56}})
@danger Map.merge(@flo_base, %{background: {191, 72, 71}, active_text: :black, active: {164, 54, 51}})
@warning Map.merge(@flo_base, %{background: {239, 196, 42}, active_text: :black, active: {197, 160, 31}})
@info Map.merge(@flo_base, %{background: {94, 159, 183}, active_text: :black, active: {70, 119, 138}})
@text Map.merge(@flo_base, %{text: {72, 122, 252}, active_text: :black, background: :clear, active: :clear})
@themes %{
base: @flo_base,
dark: @flo_dark,
light: @flo_light,
scrollbar: @scrollbar,
primary: @primary,
secondary: @secondary,
success: @success,
danger: @danger,
warning: @warning,
info: @info,
text: @text
}
# ============================================================================
# data verification and serialization
# --------------------------------------------------------
@doc false
def info(data),
do: """
#{IO.ANSI.red()}#{__MODULE__} data must either a preset theme or a map of named colors
#{IO.ANSI.yellow()}Received: #{inspect(data)}
The predefined themes are:
:dark, :light, :primary, :secondary, :success, :danger, :warning, :info, :text
If you pass in a map of colors, the common ones used in the controls are:
:text, :background, :border, :active, :thumb, :focus
#{IO.ANSI.default_color()}
"""
# --------------------------------------------------------
@doc false
def validate(name) when is_atom(name), do: Map.has_key?(@themes, name)
def validate(custom) when is_map(custom) do
Enum.all?(custom, fn {_, color} -> Color.verify(color) end)
end
def validate(_), do: false
# --------------------------------------------------------
@doc false
def normalize(theme) when is_atom(theme), do: Map.get(@themes, theme)
def normalize(theme) when is_map(theme), do: theme
# --------------------------------------------------------
@doc false
def default(), do: Map.get(@themes, :base)
# --------------------------------------------------------
@doc false
def preset(theme), do: Map.get(@themes, theme)
end | lib/theme.ex | 0.746231 | 0.540257 | theme.ex | starcoder |
defmodule Redix.PubSub do
@moduledoc """
Interface for the Redis PubSub functionality.
The rest of this documentation will assume the reader knows how PubSub works
in Redis and knows the meaning of the following Redis commands:
* `SUBSCRIBE` and `UNSUBSCRIBE`
* `PSUBSCRIBE` and `PUNSUBSCRIBE`
* `PUBLISH`
## Usage
Each `Redix.PubSub` process is able to subcribe to/unsubscribe from multiple
Redis channels, and is able to handle multiple Elixir processes subscribing
each to different channels.
A `Redix.PubSub` process can be started via `Redix.PubSub.start_link/2`; such
a process holds a single TCP connection to the Redis server.
`Redix.PubSub` has a message-oriented API: all subscribe/unsubscribe
operations are *fire-and-forget* operations (casts in `GenServer`-speak) that
always return `:ok` to the caller, whether the operation has been processed by
`Redix.PubSub` or not. When `Redix.PubSub` registers the
subscription/unsubscription, it will send a confirmation message to the
subscribed/unsubscribed process. For example:
{:ok, pubsub} = Redix.PubSub.start_link()
Redix.PubSub.subscribe(pubsub, "my_channel", self())
#=> :ok
receive do msg -> msg end
#=> {:redix_pubsub, #PID<...>, :subscribed, %{channel: "my_channel"}}
After a subscription, messages published to a channel are delivered to all
Elixir processes subscribed to that channel via `Redix.PubSub`:
# Someone publishes "hello" on "my_channel"
receive do msg -> msg end
#=> {:redix_pubsub, #PID<...>, :message, %{channel: "my_channel", payload: "hello"}}
## Reconnections
`Redix.PubSub` tries to be resilient to failures: when the connection with
Redis is interrupted (for whatever reason), it will try to reconnect to the
Redis server. When a disconnection happens, `Redix.PubSub` will notify all
clients subscribed to all channels with a `{:redix_pubsub, pid, :disconnected,
_}` message (more on the format of messages below). When the connection goes
back up, `Redix.PubSub` takes care of actually re-subscribing to the
appropriate channels on the Redis server and subscribers are notified with a
`{:redix_pubsub, pid, :subscribed, _}` message, the same as when a client
subscribes to a channel/pattern.
Note that if `exit_on_disconnection: true` is passed to
`Redix.PubSub.start_link/2`, the `Redix.PubSub` process will exit and not send
any `:disconnected` messages to subscribed clients.
## Message format
Most of the communication with a PubSub connection is done via (Elixir)
messages: the subscribers of these messages will be the processes specified at
subscription time (in `Redix.PubSub.subscribe/3` or `Redix.PubSub.psubscribe/3`).
All `Redix.PubSub` messages have the same form: they're a four-element tuple
that looks like this:
{:redix_pubsub, pid, type, properties}
where:
* `pid` is the pid of the `Redix.PubSub` process that sent this message
* `type` is the type of this message (e.g., `:subscribed` for subscription
confirmations, `:message` for PubSub messages)
* `properties` is a map of data related to that that varies based on `type`
Given this format, it's easy to match on all Redix PubSub messages by just
matching on `{:redix_pubsub, ^pid, _, _}`.
#### List of possible message types and properties
The following is a list of possible message types alongside the properties
that each can have.
* `:subscribe` or `:psubscribe` messages - they're sent as confirmation of
subscription to a channel or pattern (respectively) (via
`Redix.PubSub.subscribe/3` or `Redix.PubSub.psubscribe/3` or after a
disconnection and reconnection). One `:subscribe`/`:psubscribe` message is
received for every channel a process subscribed
to. `:subscribe`/`:psubscribe` messages have the following properties:
* `:channel` or `:pattern` - the channel/pattern the process has been
subscribed to
* `:unsubscribe` or `:punsubscribe` messages - they're sent as confirmation
of unsubscription to a channel or pattern (respectively) (via
`Redix.PubSub.unsubscribe/3` or `Redix.PubSub.punsubscribe/3`). One
`:unsubscribe`/`:punsubscribe` message is received for every channel a
process unsubscribes from. `:unsubscribe`/`:punsubscribe` messages have
the following properties:
* `:channel` or `:pattern` - the channel/pattern the process has
unsubscribed from
* `:message` messages - they're sent to subscribers to a given channel when
a message is published on that channel. `:message` messages have the
following properties:
* `:channel` - the channel this message was published on
* `:payload` - the contents of this message
* `:pmessage` messages - they're sent to subscribers to a given pattern when
a message is published on a channel that matches that pattern. `:pmessage`
messages have the following properties:
* `:channel` - the channel this message was published on
* `:pattern` - the original pattern that matched the channel
* `:payload` - the contents of this message
* `:disconnected` messages - they're sent to all subscribers to all
channels/patterns when the connection to Redis is interrupted.
`:disconnected` messages have the following properties:
* `:reason` - the reason for the disconnection (e.g., `:tcp_closed`)
## Examples
This is an example of a workflow using the PubSub functionality; it uses
[Redix](https://github.com/whatyouhide/redix) as a Redis client for publishing
messages.
{:ok, pubsub} = Redix.PubSub.start_link()
{:ok, client} = Redix.start_link()
Redix.PubSub.subscribe(pubsub, "my_channel", self())
#=> :ok
# We wait for the subscription confirmation
receive do
{:redix_pubsub, ^pubsub, :subscribed, %{channel: "my_channel"}} -> :ok
end
Redix.command!(client, ~w(PUBLISH my_channel hello)
receive do
{:redix_pubsub, ^pubsub, :message, %{channel: "my_channel"} = properties} ->
properties.payload
end
#=> "hello"
Redix.PubSub.unsubscribe(pubsub, "foo", self())
#=> :ok
# We wait for the unsubscription confirmation
receive do
{:redix_pubsub, ^pubsub, :unsubscribed, _} -> :ok
end
"""
@type subscriber :: pid | port | atom | {atom, node}
alias Redix.Utils
@default_timeout 5_000
@doc """
Starts a PubSub connection to Redis.
This function returns `{:ok, pid}` if the PubSub process is started successfully.
The actual TCP connection to the Redis server may happen either synchronously,
before `start_link/2` returns, or asynchronously: this behaviour is decided by
the `:sync_connect` option (see below).
This function accepts two arguments: the options to connect to the Redis
server (like host, port, and so on) and the options to manage the connection
and the resiliency. The Redis options can be specified as a keyword list or as
a URI.
## Redis options
### URI
In case `uri_or_redis_opts` is a Redis URI, it must be in the form:
redis://[:password@]host[:port][/db]
Here are some examples of valid URIs:
redis://localhost
redis://:secret@localhost:6397
redis://example.com:6380/1
Usernames before the password are ignored, so the these two URIs are
equivalent:
redis://:secret@localhost
redis://myuser:secret@localhost
The only mandatory thing when using URIs is the host. All other elements
(password, port, database) are optional and their default value can be found
in the "Options" section below.
### Options
The following options can be used to specify the parameters used to connect to
Redis (instead of a URI as described above):
* `:host` - (string) the host where the Redis server is running. Defaults to
`"localhost"`.
* `:port` - (integer) the port on which the Redis server is
running. Defaults to `6379`.
* `:password` - (string) the password used to connect to Redis. Defaults to
`nil`, meaning no password is used. When this option is provided, all Redix
does is issue an `AUTH` command to Redis in order to authenticate.
* `:database` - (integer or string) the database to connect to. Defaults to
`nil`, meaning don't connect to any database (Redis connects to database
`0` by default). When this option is provided, all Redix does is issue a
`SELECT` command to Redis in order to select the given database.
## Connection options
`connection_opts` is a list of options used to manage the connection. These
are the Redix-specific options that can be used:
* `:socket_opts` - (list of options) this option specifies a list of options
that are passed to `:gen_tcp.connect/4` when connecting to the Redis
server. Some socket options (like `:active` or `:binary`) will be
overridden by `Redix.PubSub` so that it functions properly. Defaults to
`[]`.
* `:sync_connect` - (boolean) decides whether Redix should initiate the TCP
connection to the Redis server *before* or *after* returning from
`start_link/2`. This option also changes some reconnection semantics; read
the ["Reconnections" page](http://hexdocs.pm/redix/reconnections.html) in
the docs for `Redix` for more information.
* `:backoff_initial` - (integer) the initial backoff time (in milliseconds),
which is the time that will be waited by the `Redix.PubSub` process before
attempting to reconnect to Redis after a disconnection or failed first
connection. See the ["Reconnections"
page](http://hexdocs.pm/redix/reconnections.html) in the docs for `Redix`
for more information.
* `:backoff_max` - (integer) the maximum length (in milliseconds) of the
time interval used between reconnection attempts. See the ["Reconnections"
page](http://hexdocs.pm/redix/reconnections.html) in the docs for `Redix`
for more information.
* `:exit_on_disconnection` - (boolean) if `true`, the Redix server will exit
if it fails to connect or disconnects from Redis. Note that setting this
option to `true` means that the `:backoff_initial` and `:backoff_max` options
will be ignored. Defaults to `false`.
* `:log` - (keyword list) a keyword list of `{action, level}` where `level` is
the log level to use to log `action`. The possible actions and their default
values are:
* `:disconnection` (defaults to `:error`) - logged when the connection to
Redis is lost
* `:failed_connection` (defaults to `:error`) - logged when Redix can't
establish a connection to Redis
* `:reconnection` (defaults to `:info`) - logged when Redix manages to
reconnect to Redis after the connection was lost
In addition to these options, all options accepted by
`Connection.start_link/3` (and thus `GenServer.start_link/3`) are forwarded to
it. For example, a `Redix.PubSub` process can be registered with a name by using the
`:name` option:
Redix.PubSub.start_link([], name: :redix_pubsub)
Process.whereis(:redix_pubsub)
#=> #PID<...>
## Examples
iex> Redix.PubSub.start_link()
{:ok, #PID<...>}
iex> Redix.PubSub.start_link(host: "example.com", port: 9999, password: "<PASSWORD>")
{:ok, #PID<...>}
iex> Redix.PubSub.start_link([database: 3], [name: :redix_3])
{:ok, #PID<...>}
"""
@spec start_link(binary | Keyword.t, Keyword.t) :: GenServer.on_start
def start_link(uri_or_redis_opts \\ [], connection_opts \\ [])
def start_link(uri, other_opts) when is_binary(uri) and is_list(other_opts) do
uri |> Redix.URI.opts_from_uri() |> start_link(other_opts)
end
def start_link(redis_opts, other_opts) do
{redix_opts, connection_opts} = Utils.sanitize_starting_opts(redis_opts, other_opts)
Connection.start_link(Redix.PubSub.Connection, redix_opts, connection_opts)
end
@doc """
Stops the given PubSub process.
This function is asynchronous (*fire and forget*): it returns `:ok` as soon as
it's called and performs the closing of the connection after that.
## Examples
iex> Redix.PubSub.stop(conn)
:ok
"""
@spec stop(GenServer.server) :: :ok
def stop(conn) do
Connection.cast(conn, :stop)
end
@doc """
Subscribes `subscriber` to the given channel or list of channels.
Subscribes `subscriber` (which can be anything that can be passed to `send/2`)
to `channels`, which can be a single channel or a list of channels.
For each of the channels in `channels` which `subscriber` successfully
subscribes to, a message will be sent to `subscriber` with this form:
{:redix_pubsub, pid, :subscribed, %{channel: channel}}
See the documentation for `Redix.PubSub` for more information about the format
of messages.
## Examples
iex> Redix.subscribe(conn, ["foo", "bar"], self())
:ok
iex> flush()
{:redix_pubsub, #PID<...>, :subscribed, %{channel: "foo"}}
{:redix_pubsub, #PID<...>, :subscribed, %{channel: "bar"}}
:ok
"""
@spec subscribe(GenServer.server, String.t | [String.t], subscriber) :: :ok
def subscribe(conn, channels, subscriber) do
Connection.cast(conn, {:subscribe, List.wrap(channels), subscriber})
end
@doc """
Subscribes `subscriber` to the given pattern or list of patterns.
Works like `subscribe/3` but subscribing `subscriber` to a pattern (or list of
patterns) instead of regular channels.
Upon successful subscription to each of the `patterns`, a message will be sent
to `subscriber` with the following form:
{:redix_pubsub, pid, :psubscribed, %{pattern: pattern}}
See the documentation for `Redix.PubSub` for more information about the format
of messages.
## Examples
iex> Redix.psubscribe(conn, "ba*", self())
:ok
iex> flush()
{:redix_pubsub, #PID<...>, :psubscribe, %{pattern: "ba*"}}
:ok
"""
@spec psubscribe(GenServer.server, String.t | [String.t], subscriber) :: :ok
def psubscribe(conn, patterns, subscriber) do
Connection.cast(conn, {:psubscribe, List.wrap(patterns), subscriber})
end
@doc """
Unsubscribes `subscriber` from the given channel or list of channels.
This function basically "undoes" what `subscribe/3` does: it unsubscribes
`subscriber` from the given channel or list of channels.
Upon successful unsubscription from each of the `channels`, a message will be
sent to `subscriber` with the following form:
{:redix_pubsub, pid, :unsubscribed, %{channel: channel}}
See the documentation for `Redix.PubSub` for more information about the format
of messages.
## Examples
iex> Redix.unsubscribe(conn, ["foo", "bar"], self())
:ok
iex> flush()
{:redix_pubsub, #PID<...>, :unsubscribed, %{channel: "foo"}}
{:redix_pubsub, #PID<...>, :unsubscribed, %{channel: "bar"}}
:ok
"""
@spec unsubscribe(GenServer.server, String.t | [String.t], subscriber) :: :ok
def unsubscribe(conn, channels, subscriber) do
Connection.cast(conn, {:unsubscribe, List.wrap(channels), subscriber})
end
@doc """
Unsubscribes `subscriber` from the given pattern or list of patterns.
This function basically "undoes" what `psubscribe/3` does: it unsubscribes
`subscriber` from the given pattern or list of patterns.
Upon successful unsubscription from each of the `patterns`, a message will be
sent to `subscriber` with the following form:
{:redix_pubsub, pid, :punsubscribed, %{pattern: pattern}}
See the documentation for `Redix.PubSub` for more information about the format
of messages.
## Examples
iex> Redix.punsubscribe(conn, "foo_*", self())
:ok
iex> flush()
{:redix_pubsub, #PID<...>, :punsubscribed, %{pattern: "foo_*"}}
:ok
"""
@spec punsubscribe(GenServer.server, String.t | [String.t], subscriber) :: :ok
def punsubscribe(conn, patterns, subscriber) do
Connection.cast(conn, {:punsubscribe, List.wrap(patterns), subscriber})
end
end | lib/redix/pubsub.ex | 0.893007 | 0.574454 | pubsub.ex | starcoder |
defmodule Sparql do
@moduledoc """
## Overview
This module offers some functionality to parse SPARQL queries. To do this I
have build a parser with the :leex and :yecc erlang libraries.
## :leex and :yecc
You can find the source files as well as the compiled erlang files
for this under ../parser-generator/
Since this uses raw erlang libraries under the hood all queries that get send
are assumed to be single quoted strings
## TODOs
TODO add a function to remove all graph statements
TODO add a function to override all graph statements with a set of graph statements
"""
@doc """
Parses a SPARQL query that gets passed in a single quoted string
(see erlang documentation on the issues with double quoted strings)
## Examples
iex> Sparql.parse('SELECT ?s ?p ?o WHERE { ?s ?p ?o }')
{:ok,
{:sparql,
{:select,
{:"select-clause", {:"var-list", [variable: :s, variable: :p, variable: :o]}},
{:where,
[
{:"same-subject-path", {:subject, {:variable, :s}},
{:"predicate-list",
[
{{:predicate, {:variable, :p}},
{:"object-list", [object: {:variable, :o}]}}
]}}
]}}
}
}
"""
def parse(raw_query) do
raw_query |> tokenize |> do_parse
end
defp tokenize(raw_query) do
:"sparql-tokenizer".string(raw_query)
end
defp do_parse({:ok, tokenized_query, _}) do
:"sparql-parser".parse(tokenized_query)
end
defp do_parse({:error, _, _} = error_message) do
error_message
end
@doc """
Converts all same-subject-paths into simple subject paths
in SPARQL itself this is the equivalent of converting.
```
?s ?p ?o ; ?p2 ?o2 , ?o3 .
```
to
```
?s ?p ?o .
?s ?p2 ?o2 .
?s ?p2 ?o3 .
```
## Examples
iex> Sparql.convert_to_simple_triples({:"same-subject-path", {:subject, {:variable, :s}},
iex> {:"predicate-list",
iex> [
iex> {{:predicate, {:variable, :p}},
iex> {:"object-list", [object: {:variable, :o}]}}
iex> ]}})
[
{{:subject, {:variable, :s}}, {:predicate, {:variable, :p}}, {:object, {:object, {:variable, :o}}}}
]
"""
def convert_to_simple_triples({:"same-subject-path", {:subject, subject}, {:"predicate-list", predicate_list}})do
convert_to_simple_triples(subject, predicate_list)
end
defp convert_to_simple_triples(subject, predicate_list) do
predicate_list
|> Enum.map(fn({{:predicate, predicate}, {:"object-list", object_list}}) ->
convert_to_simple_triples(subject, predicate, object_list) end)
|> Enum.reduce(fn(x, acc) -> Enum.into(x, acc, fn(x) -> x end) end)
end
defp convert_to_simple_triples(subject, predicate, object_list) do
Enum.map(object_list, fn(object) ->
{{:subject, subject}, {:predicate, predicate}, {:object, object}} end)
end
end | lib/sparql.ex | 0.695958 | 0.795301 | sparql.ex | starcoder |
defmodule Slip.Utils do
def to_route(path) do
String.split(String.strip(path, ?/), "/")
end
## Utilities to get parameters out of the request
def get_parameters(req_params) do
parameters = Process.get(:parameters, [])
Enum.reduce(req_params, %{}, fn(requirement = {name, _}, map) ->
result = get_parameter(requirement, parameters)
Map.put(map, name, result)
end)
end
defp get_parameter({name, {:atom, possible_atoms, default}}, parameters) do
case parameters[name] do
nil -> default
value ->
atom_value = try do
case value do
value when is_list(value) -> List.to_existing_atom(value)
value when is_binary(value) -> String.to_existing_atom(value)
value -> value
end
rescue
ArgumentError -> throw({:error, {:invalid_parameter, name, possible_atoms}})
end
case Enum.member?(possible_atoms, atom_value) do
true -> atom_value
false -> throw({:error, {:invalid_parameter, name, possible_atoms}})
end
end
end
defp get_parameter({name, {:atom, possible_atoms}}, parameters) do
case get_parameter({name, {:atom, possible_atoms, "undefined"}}, parameters) do
"undefined" -> throw({:error, {:invalid_parameter, name, possible_atoms}})
v -> v
end
end
defp get_parameter({name, {type, default}}, parameters) do
value = case parameters[name] do
nil -> default
v -> v
end
convert(value, type)
end
defp get_parameter({name, type}, parameters) do
case get_parameter({name, {type, :undefined}}, parameters) do
:undefined -> throw({:error, {:missing_parameter, name}})
v -> v
end
end
# Integers
defp convert(value, :integer) when is_list(value), do: List.to_integer(value)
defp convert(value, :integer) when is_binary(value), do: String.to_integer(value)
defp convert(value, :integer), do: value
# Float
defp convert(value, :float) when is_list(value), do: List.to_float(value)
defp convert(value, :float) when is_binary(value), do: String.to_float(value)
defp convert(value, :float), do: value
# Binary
defp convert(value, :binary) when is_list(value), do: List.to_string(value)
defp convert(value, :binary) when is_number(value), do: Integer.to_string(value)
defp convert(value, :binary), do: value
end | lib/slip_utils.ex | 0.57093 | 0.420272 | slip_utils.ex | starcoder |
defmodule Tap do
@default [formatter: &__MODULE__.format/1]
@doc ~S"""
Traces calls, return values and exceptions from the function call template
given as an argument.
## Examples
Trace calls to `&String.starts_with?/2` and print the first two events:
iex> Tap.call(String.starts_with?(_, _), 2)
1
Trace calls to `&String.starts_with?/2` when the second argument is
`"b"` and print the first event:
iex> Tap.call(String.starts_with?(_, "b"), 1)
1
"""
defmacro call(mfa, opts) do
{{:., _, [module, function]}, _, args} = mfa
args = Enum.map(args, fn {:_, _, nil} -> :_; arg -> arg end)
quote do
Tap.calls(
[{
unquote(module),
unquote(function),
[{unquote(args), [], [{:exception_trace}]}]
}],
unquote(opts)
)
end
end
@doc ~S"""
Traces on the function patterns given as an argument.
## Examples
Trace calls (but not return values) to `&String.strip` with any number of
arguments and print the first ten events:
iex> Tap.calls([{String, :strip, :_}], max: 10)
2
Trace calls and return values from `&String.strip/2` and print the first ten
events:
iex> Tap.calls([{String, :strip, {2, :return}}], max: 10)
2
"""
def calls(tspecs, opts) when is_integer(opts), do: calls(tspecs, max: opts)
def calls(tspecs, opts) do
max = Keyword.get(opts, :max, 2)
opts = Keyword.merge(@default, Keyword.drop(opts, [:max]))
:recon_trace.calls(expand(tspecs), max, opts)
end
def format(event) do
{type, info, meta} = extract(event)
case {type, info} do
## {:trace, pid, :receive, msg}
{:receive, [msg]} ->
format(meta, "< #{inspect(msg, pretty: true)}")
## {trace, Pid, send, Msg, To}
# {send, [Msg, To]} ->
# {" > ~p: ~p", [To, Msg]};
## {trace, Pid, send_to_non_existing_process, Msg, To}
# {send_to_non_existing_process, [Msg, To]} ->
# {" > (non_existent) ~p: ~p", [To, Msg]};
## {trace, Pid, call, {M, F, Args}}
{:call, [{m, f, a}]} ->
format(meta, Exception.format_mfa(m, f, a))
## {trace, Pid, return_to, {M, F, Arity}}
# {return_to, [{M,F,Arity}]} ->
# {"~p:~p/~p", [M,F,Arity]};
## {trace, Pid, return_from, {M, F, Arity}, ReturnValue}
{:return_from, [{m, f, a}, return]} ->
format(meta, [Exception.format_mfa(m, f, a), " --> ", inspect(return, pretty: true)])
## {trace, Pid, exception_from, {M, F, Arity}, {Class, Value}}
{:exception_from, [{m, f, a}, {class, reason}]} ->
format(meta, [Exception.format_mfa(m, f, a), ?\s, Exception.format(class, reason)])
# {"~p:~p/~p ~p ~p", [M,F,Arity, Class, Val]};
## {trace, Pid, spawn, Spawned, {M, F, Args}}
# {spawn, [Spawned, {M,F,Args}]} ->
# {"spawned ~p as ~p:~p~s", [Spawned, M, F, format_args(Args)]};
## {trace, Pid, exit, Reason}
# {exit, [Reason]} ->
# {"EXIT ~p", [Reason]};
## {trace, Pid, link, Pid2}
# {link, [Linked]} ->
# {"link(~p)", [Linked]};
## {trace, Pid, unlink, Pid2}
# {unlink, [Linked]} ->
# {"unlink(~p)", [Linked]};
## {trace, Pid, getting_linked, Pid2}
# {getting_linked, [Linker]} ->
# {"getting linked by ~p", [Linker]};
## {trace, Pid, getting_unlinked, Pid2}
# {getting_unlinked, [Unlinker]} ->
# {"getting unlinked by ~p", [Unlinker]};
## {trace, Pid, register, RegName}
# {register, [Name]} ->
# {"registered as ~p", [Name]};
## {trace, Pid, unregister, RegName}
# {unregister, [Name]} ->
# {"no longer registered as ~p", [Name]};
## {trace, Pid, in, {M, F, Arity} | 0}
# {in, [{M,F,Arity}]} ->
# {"scheduled in for ~p:~p/~p", [M,F,Arity]};
# {in, [0]} ->
# {"scheduled in", []};
## {trace, Pid, out, {M, F, Arity} | 0}
# {out, [{M,F,Arity}]} ->
# {"scheduled out from ~p:~p/~p", [M, F, Arity]};
# {out, [0]} ->
# {"scheduled out", []};
## {trace, Pid, gc_start, Info}
# {gc_start, [Info]} ->
# HeapSize = proplists:get_value(heap_size, Info),
# {"gc beginning -- heap ~p bytes", [HeapSize]};
## {trace, Pid, gc_end, Info}
# {gc_end, [Info]} ->
# [Info] = TraceInfo,
# HeapSize = proplists:get_value(heap_size, Info),
# OldHeapSize = proplists:get_value(old_heap_size, Info),
# {"gc finished -- heap ~p bytes (recovered ~p bytes)",
# [HeapSize, OldHeapSize-HeapSize]};
# _ ->
# {"unknown trace type ~p -- ~p", [Type, TraceInfo]}
_ ->
:recon_trace.format(event)
end
end
@doc ~S"""
Formatting the output
## Examples
iex(1)> Tap.format({{1.0,1.0,1.0},""},"test")
"1.0:1.0:1.000000 \"\" test\n\n"
"""
def format({{hour, min, sec}, pid}, message) do
"#{hour}:#{min}:#{:erlang.float_to_binary(sec, decimals: 6)} #{inspect pid} #{message}\n\n"
end
defp expand(specs), do: for(s <- specs, do: spec(s))
defp spec({m, f, p}), do: {m, f, pattern(p)}
defp spec(s), do: s
defp pattern({arity, :return}) do
[{for(_ <- 1..arity, do: :_), [], [{:exception_trace}]}]
end
defp pattern({arity, :r}), do: pattern({arity, :return})
defp pattern(:return), do: [{:_, [], [{:exception_trace}]}]
defp pattern(:r), do: pattern(:return)
defp pattern(p), do: p
defp extract(event) do
case Tuple.to_list(event) do
[:trace_ts, pid, type | rest] ->
{meta, [stamp]} = Enum.split(rest, length(rest) - 1)
{type, meta, {time(stamp), pid}}
[:trace, pid, type | meta] ->
{type, meta, {time(:os.timestamp), pid}}
end
end
defp time({_, _, micro} = stamp) do
{_, {h, m, s}} = :calendar.now_to_local_time(stamp)
{h, m, s + micro / 1000000}
end
end | lib/tap.ex | 0.66236 | 0.549399 | tap.ex | starcoder |
defmodule Day8 do
@moduledoc """
Two factor authentication screen emulator
"""
@default_width 50
@default_height 6
def count_pixels_from_file(path), do: count_pixels_from_file(@default_height, @default_width, path)
def count_pixels_from_file(rows, columns, path) do
compute(rows, columns, File.read!(path))
|> List.flatten
|> Enum.sum
end
@doc """
Display part of the board
"""
def display(board, start_col, count) do
board
|> Enum.map(&(Enum.slice(&1, start_col, count)))
|> Enum.map(&(Enum.map(&1, fn x -> if 1 == x, do: nil, else: " " end)))
|> Enum.each(&IO.inspect/1)
IO.puts "\n\n\n"
end
@doc """
Run the given list of commands on an empty board
"""
def compute(commands), do: compute(@default_height, @default_width, commands)
def compute(rows, columns, commands) do
row = List.duplicate(0, columns)
board = List.duplicate(row, rows)
commands = commands |> String.trim |> String.split("\n")
execute_commands(board, commands)
end
def execute_commands(board, []), do: board
def execute_commands(board, [cmd | rest]) do
cmd = parse_cmd(cmd)
board = execute_command(board, cmd)
execute_commands(board, rest)
end
def parse_cmd("rect " <> str) do
[cols, rows] = String.trim(str) |> String.split("x") |> Enum.map(&String.to_integer/1)
{:rect, cols, rows}
end
def parse_cmd("rotate row y="<> str) do
[row, count] = String.split(str, " by ") |> Enum.map(&String.to_integer/1)
{:rot_row, row, count}
end
def parse_cmd("rotate column x=" <> str) do
[col, count] = String.split(str, " by ") |> Enum.map(&String.to_integer/1)
{:rot_col, col, count}
end
def execute_command(board, {:rect, cols, rows}) do
IO.puts "rect: #{inspect cols}, #{inspect rows}"
fill_rows(board, cols, rows-1)
end
def execute_command(board, {:rot_row, row, count}) do
IO.puts "rotate row: #{inspect row}, #{inspect count}"
List.update_at(board, row, &(shift_row(&1, count)))
end
def execute_command(board, {:rot_col, col, count}) do
IO.puts "rotate col: #{inspect col}, #{inspect count}"
col_vals = Enum.map(board, &(Enum.at(&1, col))) |> shift_row(count)
zipped = List.zip([board, col_vals])
Enum.map(zipped, &(update_col(&1, col)))
end
defp update_col({list, val}, col), do: List.replace_at(list, col, val)
defp shift_row(row, cnt) when cnt <= 0, do: row
defp shift_row(row, cnt) do
{val, list} = List.pop_at(row, -1)
shift_row([val | list], cnt-1)
end
defp fill_rows(board, _cols, idx) when idx < 0, do: board
defp fill_rows(board, cols, idx) do
board = List.update_at(board, idx, &(fill_row(&1, cols-1)))
fill_rows(board, cols, idx-1)
end
defp fill_row(list, idx) when idx < 0, do: list
defp fill_row(list, idx) do
List.replace_at(list, idx, 1)
|> fill_row(idx-1)
end
end | day8/lib/day8.ex | 0.727007 | 0.499329 | day8.ex | starcoder |
defmodule Mix.Releases.Config.Provider do
@moduledoc """
This defines the behaviour for custom configuration providers.
Keys supplied to providers are a list of atoms which represent the path of the
configuration key, beginning with the application name:
> MyProvider.get([:myapp, :server, :port])
{:ok, 8080}
> MyProvider.get([:myapp, :invalid, :key])
nil
The `init/1` function is called during startup, typically before any applications are running,
with the exception of `:kernel`, `:stdlib`, `:elixir`, and `:compiler`. All application code will
be loaded, but you may want to be explicit about it to ensure that this is true even in a relaxed
code loading environment.
The `init/1` function will receive the arguments it was provided in its definition.
"""
defmacro __using__(_) do
quote do
@behaviour unquote(__MODULE__)
end
end
@doc """
Called when the provider is initialized
"""
@callback init(args :: [term]) :: :ok | no_return
@doc """
Called when the provider is being asked to supply a value for the given key
"""
@callback get(key :: [atom]) :: {:ok, term} | nil
@doc false
def init(providers) when is_list(providers) do
# If called later, reset the table
case :ets.info(__MODULE__, :size) do
:undefined ->
:ets.new(__MODULE__, [:public, :set, :named_table])
_ ->
:ets.delete_all_objects(__MODULE__)
end
for provider <- providers do
case provider do
p when is_atom(p) ->
:ets.insert(__MODULE__, {p, []})
p.init([])
{p, args} ->
:ets.insert(__MODULE__, provider)
p.init(args)
p ->
raise ArgumentError,
message:
"Invalid #{__MODULE__}: Expected module or `{module, args}` tuple, got #{inspect(p)}"
end
end
end
@doc false
def get(key) do
get(:ets.tab2list(__MODULE__), key)
end
defp get([], _key), do: nil
defp get([provider | providers], key) do
case provider.get(key) do
nil ->
get(providers, key)
{:ok, _} = val ->
val
end
end
end | lib/mix/lib/releases/config/provider.ex | 0.737631 | 0.443841 | provider.ex | starcoder |
defmodule Grizzly.ZWave.Commands.FailedNodeListReport do
@moduledoc """
This command is used to advertise the current list of failing nodes in the network.
Params:
* `:seq_number` - Sequence number
* `:node_ids` - The ids of all nodes in the network found to be unresponsive
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave
alias Grizzly.ZWave.{Command, DecodeError}
alias Grizzly.ZWave.CommandClasses.NetworkManagementProxy
@type param :: {:node_ids, [ZWave.node_id()]} | {:seq_number, ZWave.seq_number()}
@impl true
def new(params) do
command = %Command{
name: :failed_node_list_report,
command_byte: 0x0C,
command_class: NetworkManagementProxy,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl true
def encode_params(command) do
seq_number = Command.param!(command, :seq_number)
node_ids = Command.param!(command, :node_ids)
node_id_bytes = node_ids_to_bytes(node_ids)
<<seq_number>> <> node_id_bytes
end
@impl true
@spec decode_params(binary()) :: {:ok, [param()]} | {:error, DecodeError.t()}
def decode_params(<<seq_number, node_id_bytes::binary>>) do
node_ids = node_ids_from_bytes(node_id_bytes)
{:ok, [seq_number: seq_number, node_ids: node_ids]}
end
defp node_ids_to_bytes(node_ids) do
bytes =
for byte_index <- 0..28 do
for bit_index <- 8..1, into: <<>> do
node_id = byte_index * 8 + bit_index
if node_id in node_ids, do: <<1::size(1)>>, else: <<0::size(1)>>
end
end
for byte <- bytes, into: <<>>, do: byte
end
defp node_ids_from_bytes(binary) do
:erlang.binary_to_list(binary)
|> Enum.with_index()
|> Enum.reduce(
[],
fn {byte, byte_index}, acc ->
bit_list = for <<(bit::size(1) <- <<byte>>)>>, do: bit
id_or_nil_list =
for bit_index <- 0..7 do
bit = Enum.at(bit_list, 7 - bit_index)
if bit == 1, do: byte_index * 8 + bit_index + 1, else: nil
end
acc ++ Enum.reject(id_or_nil_list, &(&1 == nil))
end
)
end
end | lib/grizzly/zwave/commands/failed_node_list_report.ex | 0.808446 | 0.41324 | failed_node_list_report.ex | starcoder |
defmodule Specify.Parsers do
@moduledoc """
Simple functions to parse strings to datatypes commonly used during configuration.
These functions can be used as parser/validator function in a call to `Specify.Schema.field`,
by using their shorthand name (`:integer` as shorthand for `&Specify.Parsers.integer/1`).
(Of course, using their longhand name works as well.)
## Defining your own parser function
A parser function receives the to-be-parsed/validated value as input,
and should return `{:ok, parsed_val}` on success,
or `{:error, reason}` on failure.
Be aware that depending on where the configuration is loaded from,
the to-be-parsed value might be a binary string,
or already the Elixir type you want to convert it to.
"""
@doc """
Parses an integer and turns binary string representing an integer into an integer.
"""
def integer(int) when is_integer(int), do: {:ok, int}
def integer(binary) when is_binary(binary) do
case Integer.parse(binary) do
{int, ""} -> {:ok, int}
{_int, _rest} -> {:error, "the binary `#{binary}` cannot be parsed to an integer."}
:error -> {:error, "the binary `#{binary}` cannot be parsed to an integer."}
end
end
def integer(other), do: {:error, "#{inspect(other)} is not an integer."}
@doc """
Similar to integer/1, but only accepts integers larger than 0.
"""
def positive_integer(val) do
with {:ok, int} <- integer(val) do
if int > 0 do
{:ok, int}
else
{:error, "integer #{int} is not a positive integer."}
end
end
end
@doc """
Similar to integer/1, but only accepts integers larger than or equal to 0.
"""
def nonnegative_integer(val) do
with {:ok, int} <- integer(val) do
if int >= 0 do
{:ok, int}
else
{:error, "integer #{int} is not a nonnegative integer."}
end
end
end
@doc """
Parses a float and turns a binary string representing a float into an float.
Will also accept integers, which are turned into their float equivalent.
"""
def float(float) when is_float(float), do: {:ok, float}
def float(int) when is_integer(int), do: {:ok, 1.0 * int}
def float(binary) when is_binary(binary) do
case Float.parse(binary) do
{float, ""} -> {:ok, float}
{_float, _rest} -> {:error, "the binary `#{binary}` cannot be parserd to a float."}
:error -> {:error, "the binary `#{binary}` cannot be parserd to a float."}
end
end
def float(other), do: {:error, "`#{inspect(other)}` is not a float"}
@doc """
Similar to float/1, but only accepts floats larger than 0.
"""
def positive_float(val) do
with {:ok, float} <- float(val) do
if float > 0 do
{:ok, float}
else
{:error, "float #{float} is not a positive float."}
end
end
end
@doc """
Similar to float/1, but only accepts floats larger than or equal to 0.
"""
def nonnegative_float(val) do
with {:ok, float} <- float(val) do
if float >= 0 do
{:ok, float}
else
{:error, "float #{float} is not a nonnegative float."}
end
end
end
@doc """
Parses a binary string and turns anything that implements `String.Chars` into its binary string representation by calling `to_string/1` on it.
"""
def string(binary) when is_binary(binary), do: {:ok, binary}
def string(thing) do
try do
{:ok, to_string(thing)}
rescue
ArgumentError ->
{:error,
"`#{inspect(thing)}` cannot be converted to string because it does not implement the String.Chars protocol."}
end
end
@doc """
Accepts any Elixir term as-is. Will not do any parsing.
Only use this as a last resort. It is usually better to create your own dedicated parsing function instead.
"""
def term(anything), do: {:ok, anything}
@doc """
Parses a boolean or a binary string representing a boolean value, turning it into a boolean.
"""
def boolean(boolean) when is_boolean(boolean), do: {:ok, boolean}
def boolean(binary) when is_binary(binary) do
case binary |> Macro.underscore() do
"true" -> {:ok, true}
"false" -> {:ok, false}
_ -> {:error, "`#{binary}` cannot be parsed to a boolean."}
end
end
def boolean(other), do: {:error, "`#{inspect(other)}` is not a boolean."}
@doc """
Parses an atom or a binary string representing an (existing) atom.
Will not create new atoms (See `String.to_existing_atom/1` for more info).
"""
def atom(atom) when is_atom(atom), do: {:ok, atom}
def atom(binary) when is_binary(binary) do
try do
{:ok, String.to_existing_atom(binary)}
rescue
ArgumentError ->
{:error, "`#{binary}` is not an existing atom."}
end
end
def atom(other), do: {:error, "`#{inspect(other)}` is not an (existing) atom."}
@doc """
Parses an atom or a binary string representing an (potentially not yet existing!) atom.
Will create new atoms. Whenever possible, consider using `atom/1` instead.
(See `String.to_atom/1` for more info on why creating new atoms is usually a bad idea).
"""
def unsafe_atom(atom) when is_atom(atom), do: {:ok, atom}
def unsafe_atom(binary) when is_binary(binary) do
{:ok, String.to_atom(binary)}
end
def unsafe_atom(other), do: {:error, "`#{inspect(other)}` is not convertible to an atom."}
@doc """
Parses a list of elements.
In the case a binary string was passed, this parser uses `Code.string_to_quoted` under the hood to check for Elixir syntax, and will only accepts binaries representing lists.
If a list was passed in (or after turning a binary into a list), it will try to parse each of the elements in turn.
"""
def list(list, elem_parser) when is_list(list) do
res_list =
Enum.reduce_while(list, [], fn
elem, acc ->
case elem_parser.(elem) do
{:ok, res} ->
{:cont, [res | acc]}
{:error, reason} ->
{:halt,
{:error,
"One of the elements of input list `#{inspect(list)}` failed to parse: \n#{reason}."}}
end
end)
case res_list do
{:error, reason} ->
{:error, reason}
parsed_list when is_list(parsed_list) ->
{:ok, Enum.reverse(parsed_list)}
end
end
def list(binary, elem_parser) when is_binary(binary) do
case string_to_term(binary) do
{:ok, list_ast} when is_list(list_ast) ->
list_ast
|> Enum.map(&Macro.expand(&1, __ENV__))
|> list(elem_parser)
{:ok, _not_a_list} ->
{:error,
"`#{inspect(binary)}`, while parseable as Elixir code, does not represent an Elixir list."}
{:error, reason} ->
{:error, reason}
end
end
def list(term, _) do
{:error, "`#{inspect(term)}` does not represent an Elixir list."}
end
@doc """
Allows to pass in a 'timeout' which is a common setting for OTP-related features,
accepting either a positive integer, or the atom `:infinity`.
"""
def timeout(raw) do
case positive_integer(raw) do
{:ok, int} ->
{:ok, int}
{:error, _} ->
case atom(raw) do
{:ok, :infinity} ->
{:ok, :infinity}
{:ok, _} ->
{:error,
"#{inspect(raw)} is neither a positive integer nor the special atom value `:infinity`"}
{:error, _} ->
{:error,
"`#{inspect(raw)}` is neither a positive integer nor the special atom value `:infinity`"}
end
end
end
@doc """
Parses a Module-Function-Arity tuple.
Accepts it both as Elixir three-element tuple (where the first two elements are atoms, and the third is a nonnegative integer), or as string representation of the same.
Will also check and ensure that this function is actually defined.
"""
def mfa(raw) when is_binary(raw) do
case string_to_term(raw) do
{:ok, {module, function, arity}}
when is_atom(module) and is_atom(function) and is_integer(arity) ->
mfa({module, function, arity})
{:ok, _other} ->
{:error, "`#{inspect(raw)}`, while parseable as Elixir code, does not represent a Module-Function-Arity tuple."}
{:error, reason} ->
{:error, reason}
end
end
def mfa(mfa = {module, function, arity}) when is_atom(module) and is_atom(function) and is_integer(arity) and arity >= 0 do
if function_exported?(module, function, arity) do
{:ok, mfa}
else
{:error, "function #{module}.#{function}/#{arity} does not exist."}
end
end
def mfa(other_val) do
{:error, "`#{inspect(other_val)}` is not a Module-Function-Arity tuple"}
end
def unquote_atom(atom) when is_atom(atom) do
{:ok, atom}
end
def unquote_atom(aliased_atom = {:__aliases__, _, [atom]}) when is_atom(atom) do
case Code.eval_quoted(aliased_atom) do
{result, []} ->
{:ok, result}
other ->
{:error, "`#{inspect(other)}` cannot be unquoted as an atom."}
end
end
def unquote_atom(other) do
{:error, "`#{inspect(other)}` cannot be unquoted as an atom."}
end
@doc """
Parses a function.
This can be a function capture, or a MFA (Module-Function-Arity) tuple, which will
be transformed into the `&Module.function/arity` capture.
(So in either case, you end up with a function value
that you can call using the dot operator, i.e. `.()` or `.(maybe, some, args)`).
## String Contexts
For contexts in which values are specified as strings, the parser only supports the MFA format.
This is for security (and ease of parsing) reasons.
"""
def function(raw) when is_binary(raw) or is_tuple(raw) do
with {:ok, {module, function, arity}} <- mfa(raw),
{fun, []} <- Code.eval_quoted(quote do &unquote(module).unquote(function)/unquote(arity) end) do
{:ok, fun}
end
end
def function(fun) when is_function(fun) do
{:ok, fun}
end
def function(other) do
{:error, "`#{other}` cannot be parsed as a function."}
end
@doc """
Parses an option.
An option is a 2-tuple whose first element is an atom, and the second an arbitrary term.
The following terms are options:
- `{:a, :b}`
- `{MyApp.Module, "Hellow, world!"}`
- `{:some_atom, %{[] => {1, 2, 3, 4, 5}}}`
In the case a binary string was passed, this parser uses `Code.string_to_quoted` under the
hood to parse the terms.
It can be convenently used alongside the list parser to check for keyword list:
`{:list, :option}`.
"""
def option(raw) when is_binary(raw) do
case string_to_term(raw, existing_atoms_only: true) do
{:ok, term} when not is_binary(term) ->
option(term)
{:ok, term} ->
{:error, "the term `#{inspect(term)}` cannot be parsed to an option."}
{:error, _} = error ->
error
end
end
def option({key, value}) when is_atom(key) do
{:ok, {key, value}}
end
def option(term) do
{:error, "the term `#{inspect(term)}` cannot be parsed to an option."}
end
defp string_to_term(binary, opts \\ [existing_atoms_only: true]) when is_binary(binary) do
case Code.string_to_quoted(binary, opts) do
{:ok, ast} ->
{:ok, ast_to_term(ast)}
{:error, _} = error ->
error
end
rescue
e ->
{:error, e}
end
defp ast_to_term(term) when is_atom(term), do: term
defp ast_to_term(term) when is_integer(term), do: term
defp ast_to_term(term) when is_float(term), do: term
defp ast_to_term(term) when is_binary(term), do: term
defp ast_to_term([]), do: []
defp ast_to_term([h | t]), do: [ast_to_term(h) | ast_to_term(t)]
defp ast_to_term({a, b}), do: {ast_to_term(a), ast_to_term(b)}
defp ast_to_term({:{}, _place, terms}),
do: terms |> Enum.map(&ast_to_term/1) |> List.to_tuple()
defp ast_to_term({:%{}, _place, terms}),
do: for {k, v} <- terms, into: %{}, do: {ast_to_term(k), ast_to_term(v)}
defp ast_to_term(aliased = {:__aliases__, _, _}), do: Macro.expand(aliased, __ENV__)
defp ast_to_term({:+, _, [number]}), do: number
defp ast_to_term({:-, _, [number]}), do: -number
defp ast_to_term(ast), do: raise ArgumentError, message: "invalid term `#{inspect(ast)}`"
end | lib/specify/parsers.ex | 0.884968 | 0.706496 | parsers.ex | starcoder |
defmodule Onion.Common.DataValidator do
defp to_atom(value) when is_atom(value), do: {:ok, value }
defp to_atom(value) when is_binary(value), do: {:ok, String.to_atom(value) }
defp to_atom(value), do: {:error, value}
defp to_existing_atom(value) when is_atom(value), do: {:ok, value }
defp to_existing_atom(value) when is_binary(value), do: {:ok, String.to_existing_atom(value) }
defp to_existing_atom(value), do: {:error, value}
defp to_binary(value) when is_atom(value), do: {:ok, Atom.to_string(value) }
defp to_binary(value) when is_binary(value), do: {:ok, value }
defp to_binary(value) when is_integer(value), do: {:ok, Integer.to_string(value) }
defp to_binary(value) when is_float(value), do: {:ok, Float.to_string(value) }
defp to_binary(value), do: {:error, value}
defp to_integer(value) when is_integer(value), do: {:ok, value }
defp to_integer(value) when is_float(value), do: {:ok, trunc(value) }
defp to_integer(value) when is_binary(value) do
case Integer.parse(value) do
:error -> {:error, value}
{val, _} -> {:ok, val}
end
end
defp to_integer(value), do: {:error, value}
defp to_bool(0), do: {:ok, false}
defp to_bool(""), do: {:ok, false}
defp to_bool("no"), do: {:ok, false}
defp to_bool("false"), do: {:ok, false}
defp to_bool(false), do: {:ok, false}
defp to_bool(:no), do: {:ok, false}
defp to_bool(value) when is_binary(value), do: {:ok, true}
defp to_bool(value) when is_atom(value), do: {:ok, true}
defp to_bool(value) when is_integer(value), do: {:ok, true}
defp to_float(value) when is_float(value), do: {:ok, value }
defp to_float(value) when is_integer(value), do: {:ok, value * 1.0 }
defp to_float(value) when is_binary(value) do
case Float.parse(value) do
:error -> {:error, value}
{val, _} -> {:ok, val}
end
end
defp to_float(value), do: {:error, value}
defp to_list(value) when is_list(value), do: {:ok, value}
defp to_list(value) when is_integer(value) or is_float(value) or is_atom(value), do: {:ok, [value]}
defp to_list(value) when is_binary(value), do: {:ok, String.split(value, ",") }
defp to_list(value), do: {:error, value}
defp to_map(value) when is_map(value), do: {:ok, value}
defp to_map(value), do: {:error, value}
defp to_any(value), do: {:ok, value}
defp to_int_list(a={:error, _}), do: a
defp to_int_list({:ok, val}), do: to_int_list(val)
defp to_int_list(value) when is_list(value) do
case (value |> Enum.reduce {:ok, []},
fn(item, {:ok, array}) ->
case to_integer(item) do
{:ok, new_item} -> {:ok, [new_item|array]}
{:error, _} -> :error
end;
(_, :error) -> :error
end) do
:error -> {:error, value}
{:ok, val} -> {:ok, Enum.reverse(val)}
end
end
defp to_bin_list(a={:error, _}), do: a
defp to_bin_list({:ok, val}), do: to_bin_list(val)
defp to_bin_list(value) when is_list(value) do
case (value |> Enum.reduce {:ok, []},
fn(item, {:ok, array}) ->
case to_binary(item) do
{:ok, new_item} -> {:ok, [new_item|array]}
{:error, _} -> :error
end;
(_, :error) -> :error
end) do
:error -> {:error, value}
{:ok, val} -> {:ok, Enum.reverse(val) }
end
end
defp to_atom_list(a={:error, _}), do: a
defp to_atom_list({:ok, val}), do: to_atom_list(val)
defp to_atom_list(value) when is_list(value) do
case (value |> Enum.reduce {:ok, []},
fn(item, {:ok, array}) ->
case to_atom(item) do
{:ok, new_item} -> {:ok, [new_item|array]}
{:error, _} -> :error
end;
(_, :error) -> :error
end) do
:error -> {:error, value}
{:ok, val} -> {:ok, Enum.reverse(val) }
end
end
defp to_existing_atom_list(a={:error, _}), do: a
defp to_existing_atom_list({:ok, val}), do: to_existing_atom_list(val)
defp to_existing_atom_list(value) when is_list(value) do
case (value |> Enum.reduce {:ok, []},
fn(item, {:ok, array}) ->
case to_existing_atom(item) do
{:ok, new_item} -> {:ok, [new_item|array]}
{:error, _} -> :error
end;
(_, :error) -> :error
end) do
:error -> {:error, value}
{:ok, val} -> {:ok, Enum.reverse(val) }
end
end
defp to_float_list(a={:error, _}), do: a
defp to_float_list({:ok, val}), do: to_float_list(val)
defp to_float_list(value) when is_list(value) do
case (value |> Enum.reduce {:ok, []},
fn(item, {:ok, array}) ->
case to_float(item) do
{:ok, new_item} -> {:ok, [new_item|array]}
{:error, _} -> :error
end;
(_, :error) -> :error
end) do
:error -> {:error, value}
{:ok, val} -> {:ok, Enum.reverse(val) }
end
end
defp process(value, :atom), do: to_atom(value)
defp process(value, :exatom), do: to_existing_atom(value)
defp process(value, :existing_atom), do: to_existing_atom(value)
defp process(value, :map), do: to_map(value)
defp process(value, :bool), do: to_bool(value)
defp process(value, :boolean), do: to_bool(value)
defp process(value, :float), do: to_float(value)
defp process(value, :timestamp), do: to_integer(value)
defp process(value, :integer), do: to_integer(value)
defp process(value, :int), do: to_integer(value)
defp process(value, :binary), do: to_binary(value)
defp process(value, :bin), do: to_binary(value)
defp process(value, :string), do: to_binary(value)
defp process(value, :str), do: to_binary(value)
defp process(value, :list), do: to_list(value)
defp process(value, :timestamp_list), do: to_list(value) |> to_int_list
defp process(value, :integer_list), do: to_list(value) |> to_int_list
defp process(value, :int_list), do: to_list(value) |> to_int_list
defp process(value, :float_list), do: to_list(value) |> to_float_list
defp process(value, :bin_list), do: to_list(value) |> to_bin_list
defp process(value, :str_list), do: to_list(value) |> to_bin_list
defp process(value, :string_list), do: to_list(value) |> to_bin_list
defp process(value, :binary_list), do: to_list(value) |> to_bin_list
defp process(value, :atom_list), do: to_list(value) |> to_atom_list
defp process(value, :exatom_list), do: to_list(value) |> to_existing_atom_list
defp process(value, :existing_atom_list), do: to_list(value) |> to_existing_atom_list
defp process(value, :any_list), do: to_list(value)
defp process(value, :any), do: to_any(value)
defp process(value, _type), do: {:unprocess, value}
defp process_mandatory([], _, res), do: {:ok, res}
defp process_mandatory([{key, type}|tail], dict, res) do
case Dict.has_key?(dict, key) do
false -> :error
true ->
case process(dict[key], type) do
{:ok, new_value} -> process_mandatory(tail, dict, Dict.put(res, key, new_value))
_ -> :error
end
end
end
defp process_optional([], _, res), do: {:ok, res}
defp process_optional([{key, [{ type, default }] }|tail], dict, res), do: process_optional([{key, { type, default } }|tail], dict, res)
defp process_optional([{key, { type, default } }|tail], dict, res) do
case Dict.has_key?(dict, key) do
false -> process_optional(tail, dict, Dict.put(res, key, default))
true ->
case process(dict[key], type) do
{:ok, new_value} -> process_optional(tail, dict, Dict.put(res, key, new_value))
_ -> :error
end
end
end
defp process_optional([{key, type}|tail], dict, res) do
case Dict.has_key?(dict, key) do
false -> process_optional(tail, dict, res)
true ->
case process(dict[key], type) do
{:ok, new_value} -> process_optional(tail, dict, Dict.put(res, key, new_value))
_ -> :error
end
end
end
defp process_other([], res), do: res
defp process_other([{key, value}|dict], res) do
case Dict.has_key?(res, key) do
false -> process_other(dict, Dict.put(res, key, value))
true -> process_other(dict, res)
end
end
def validate(dict, mandatory, optional \\ [], strict \\ false) do
case process_mandatory(key_to_bin(mandatory), dict, %{}) do
{:ok, new_dict} ->
case process_optional(key_to_bin(optional), dict, new_dict) do
{:ok, new_dict2} ->
case strict || false do
false -> {:ok, process_other(dict |> Enum.into([]), new_dict2)}
true -> {:ok, new_dict2}
end
:error -> {:error, dict}
end
:error -> {:error, dict}
end
end
def key_to_bin(dict), do: dict |> Enum.map(fn({key, value}) when is_atom(key) -> {Atom.to_string(key), value}; (item) -> item end)
def key_to_bin_dict(dict), do: key_to_bin(dict) |> Enum.into %{}
end | lib/common/datavalidator.ex | 0.560253 | 0.6899 | datavalidator.ex | starcoder |
defmodule VintageNetWiFi.WPSData do
@moduledoc """
Utilities for handling WPS data
"""
@typedoc """
A map containing WPS data
All keys are optional. Known keys use atoms. Unknown keys use their numeric
value and their value is left as a raw binary.
Known keys:
* `:credential` - a map of WiFi credentials (also WPS data)
* `:mac_address` - a MAC address in string form (i.e., `"aa:bb:cc:dd:ee:ff"`)
* `:network_key` - a passphrase or PSK
* `:network_index` - the key index
"""
@type t() :: %{
optional(:credential) => t(),
optional(:mac_address) => binary(),
optional(:network_key) => binary(),
optional(:network_index) => non_neg_integer(),
optional(0..65536) => binary()
}
@doc """
Decode WPS data
The WPS data is expected to be in hex string form like what the
wpa_supplicant reports.
"""
@spec decode(binary) :: {:ok, t()} | :error
def decode(hex_string) when is_binary(hex_string) do
with {:ok, raw_bytes} <- Base.decode16(hex_string, case: :mixed) do
decode_all_tlv(raw_bytes, %{})
end
end
defp decode_all_tlv(<<>>, result), do: {:ok, result}
defp decode_all_tlv(<<tag::16, len::16, value::binary-size(len), rest::binary>>, result) do
with {t, v} <- decode_tlv(tag, value) do
decode_all_tlv(rest, Map.put(result, t, v))
end
end
defp decode_all_tlv(_unexpected, _result), do: :error
defp decode_tlv(0x100E, value) do
with {:ok, decoded} <- decode_all_tlv(value, %{}) do
{:credential, decoded}
end
end
defp decode_tlv(0x1045, value), do: {:ssid, value}
defp decode_tlv(0x1027, value), do: {:network_key, value}
defp decode_tlv(0x1020, <<value::binary-size(6)>>) do
mac =
value
|> Base.encode16()
|> String.codepoints()
|> Enum.chunk_every(2)
|> Enum.join(":")
{:mac_address, mac}
end
defp decode_tlv(0x1026, <<n>>), do: {:network_index, n}
defp decode_tlv(tag, value), do: {tag, value}
end | lib/vintage_net_wifi/wps_data.ex | 0.845145 | 0.479808 | wps_data.ex | starcoder |
defmodule Gentry.Worker do
@moduledoc """
The worker is responsible for actually running and coordinating the
retries of the task given in the form of a function called
`task_function`.
The task is spawened and monitored by the worker using
`Task.Supervisor.async_nolink`
The number of retries and the backoff between retries are taken from
the configuration.
"""
use GenServer
require Logger
defmodule State do
@moduledoc """
- `retries_remaining` is counting down to 0 for retries
- `task_function` is the whole purpose: the task we're trying to run
- `runner_pid` is the process that requested the task to be run and will get the reply
- `task` is the spawned task that's executing `task_function`
"""
defstruct retries_remaining: nil, task_function: nil, runner_pid: nil, task: nil
end
def start_link(task_function, runner_pid) do
GenServer.start_link(__MODULE__, [task_function, runner_pid])
end
def init([task_function, runner_pid]) do
initial_state = %State{
retries_remaining: retries(),
task_function: task_function,
runner_pid: runner_pid
}
Logger.debug(
"Worker #{inspect(self())} is starting with inital state: #{inspect(initial_state)}"
)
send(self(), {:execute_function})
{:ok, initial_state}
end
## Internal
def handle_info({:execute_function}, state) do
spawn_task(state)
end
# Receive the result of the task
def handle_info({ref, :ok}, %{task: %{ref: task_ref}} = state) when ref == task_ref do
handle_success(:ok, state)
end
def handle_info({ref, {:ok, result}}, %{task: %{ref: task_ref}} = state) when ref == task_ref do
handle_success(result, state)
end
def handle_info({ref, error}, %{task: %{ref: task_ref}} = state) when ref == task_ref do
Logger.debug("Received error result from task: #{inspect(error)}")
handle_failure(state, error)
end
def handle_info({:DOWN, ref, :process, _pid, :normal}, %{task: %{ref: task_ref}} = state)
when ref == task_ref do
Logger.debug("Normal shutdown of #{inspect(ref)}")
{:noreply, state}
end
def handle_info({:DOWN, ref, :process, _pid, error}, %{task: %{ref: task_ref}} = state)
when ref == task_ref do
Logger.warn(
"Abnormal shutdown of #{inspect(ref)}, error: #{inspect(error)}, retries remaining: #{
state.retries_remaining
}"
)
handle_failure(state, error)
end
def handle_info(msg, state) do
# catch all
Logger.warn("Unexpected message: #{inspect(msg)} with state: #{inspect(state)}")
{:noreply, state}
end
def compute_delay(retries_remaining) do
(retry_backoff() * :math.pow(2, retries() - retries_remaining))
|> round
end
defp spawn_task(state) do
task = Task.Supervisor.async_nolink(:task_supervisor, state.task_function)
new_state =
state
|> Map.put(:task, task)
{:noreply, new_state}
end
def handle_success(result, state) do
Logger.debug("Received completion from task: #{inspect(result)}")
# Send the reply
send(state.runner_pid, {:gentry, self(), :ok, result})
{:stop, :normal, state}
end
defp handle_failure(%{retries_remaining: retries_remaining} = state, _error)
when retries_remaining > 0 do
send(state.runner_pid, {:gentry, self(), :retry, state.retries_remaining})
Logger.debug("Retrying with #{state.retries_remaining} retries remaining")
retry(state.retries_remaining)
{:noreply, %State{state | retries_remaining: state.retries_remaining - 1}, :infinity}
end
defp handle_failure(state, error) do
send(state.runner_pid, {:gentry, self(), :error, error})
{:stop, {:shutdown, :max_retries_exceeded}, state}
end
defp retry(retries_remaining) do
Logger.debug("Retrying after #{compute_delay(retries_remaining)}")
Process.send_after(self(), {:execute_function}, compute_delay(retries_remaining))
end
defp retries do
Application.get_env(:gentry, :retries, 5)
end
defp retry_backoff do
Application.get_env(:gentry, :retry_backoff, 5_000)
end
end | lib/gentry/worker.ex | 0.665084 | 0.645525 | worker.ex | starcoder |
defmodule Kashup.Store do
@moduledoc """
Internal API for the underlying storage mechanism.
`Kashup.Store` maps provided keys to a `pid()` whose process, if alive, contains the value
associated with the key.
Kashup ships with [mnesia](https://erlang.org/doc/man/mnesia.html) as the default storage.
You can read more about mnesia and Elixir [here](https://elixirschool.com/en/lessons/specifics/mnesia/).
"""
alias :mnesia, as: Mnesia
@doc """
Initialize the storage mechanism.
"""
def init() do
Mnesia.stop()
Mnesia.delete_schema([Node.self()])
Mnesia.start()
Disco.fetch_capabilities(Kashup)
|> List.delete(Node.self())
|> sync_db()
end
@doc """
Add a key/pid() entry.
"""
def put(key, pid) do
Mnesia.dirty_write({KeyToPid, key, pid})
end
@doc """
Get a pid() with a provided key.
"""
def get(key) do
with [{KeyToPid, _key, pid}] <- Mnesia.dirty_read({KeyToPid, key}),
true <- pid_alive?(pid)
do
{:ok, pid}
else
_ -> {:error, :not_found}
end
end
@doc """
Remove an entry based on its value, a pid().
This function is called from a `Keshup.Element` instance, as a cleanup operation to be performed
during its termination. A `Keshup.Element` does not have access to the client's provided key, so
it instead passes `self()` to this function, removing its reference from the store after a value
is removed.
"""
def delete(pid) do
case Mnesia.dirty_index_read(KeyToPid, pid, :pid) do
[record] -> Mnesia.dirty_delete_object(record)
_ -> :ok
end
end
defp sync_db([]) do
Mnesia.create_table(KeyToPid, [attributes: [:key, :pid]])
Mnesia.add_table_index(KeyToPid, :pid)
end
defp sync_db(kashup_nodes), do: add_kashup_nodes(kashup_nodes)
defp add_kashup_nodes([node | tail]) do
case Mnesia.change_config(:extra_db_nodes, [node]) do
{:ok, [_node]} ->
Mnesia.add_table_copy(:schema, Node.self(), :ram_copies)
Mnesia.add_table_copy(KeyToPid, Node.self(), :ram_copies)
Mnesia.system_info(:tables)
|> Mnesia.wait_for_tables(5000)
_ -> add_kashup_nodes(tail)
end
end
defp pid_alive?(pid) when node(pid) == node() do
Process.alive?(pid)
end
defp pid_alive?(pid) do
member? = Enum.member?(Node.list(), node(pid))
alive? = Task.Supervisor.async({Kashup.TaskSupervisor, node(pid)}, Process, :alive?, [pid])
|> Task.await()
member? and alive?
end
end | lib/kashup/store.ex | 0.867892 | 0.57338 | store.ex | starcoder |
defmodule Prog do
@moduledoc """
Documentation for `Prog`.
"""
@doc """
Day 7
"""
def solve do
{:ok, raw} = File.read("data/day_7")
# raw = "shiny gold bags contain 2 dark red bags.
# dark red bags contain 2 dark orange bags.
# dark orange bags contain 2 dark yellow bags.
# dark yellow bags contain 2 dark green bags.
# dark green bags contain 2 dark blue bags.
# dark blue bags contain 2 dark violet bags.
# dark violet bags contain no other bags.
# "
final = String.split(raw, "\n", trim: true)
|> Enum.reduce(Map.new(), fn bag, acc ->
{b, bs} = extract(bag)
Map.put(acc, b, bs)
end)
IO.inspect final
part_two = find_contains_count(final, "shiny gold")
IO.inspect part_two
please = Enum.map(final, fn {bag, c} -> can_contain(bag, final, "shiny gold") end)
count = Enum.reduce(please, 0, fn entry, acc -> if entry, do: 1 + acc, else: acc end)
end
def extract(rule) do
[bag, bags_it_contains] = String.split(rule, "bags contain", trim: true)
bags_moar = String.split(bags_it_contains, ", ", trim: true)
bags_free = Enum.map(bags_moar, fn b -> String.trim_trailing(String.trim_trailing(String.trim_trailing(String.trim(b), "."), " bags"), " bag") end)
bags_final = Enum.reduce(bags_free, Map.new(), fn b, acc ->
[num | rem] = String.split(b, " ")
Map.put(acc, Enum.join(rem, " "), num)
end)
{String.trim(bag), bags_final}
end
def can_contain(bag, memo, query) do
current = Map.get(memo, bag)
cond do
current == %{"other" => "no"} ->
false
Map.has_key?(current, query) ->
true
true ->
results = Map.get(memo, bag)
Enum.any?(Map.keys(results), fn b -> can_contain(b, memo, query) end)
end
end
def find_contains_count(memo, query) do
current = Map.get(memo, query)
IO.inspect current
cond do
current == %{"other" => "no"} ->
0
true ->
Enum.reduce(current, 0, fn {k, v}, acc ->
cur = acc + (find_contains_count(memo, k) * String.to_integer(v)) + String.to_integer(v)
cur
end)
end
end
end
Prog.solve | lib/days/day_7.ex | 0.623377 | 0.418073 | day_7.ex | starcoder |
defmodule Grid do
@type grid_t :: %{width: integer, height: integer, size: integer, data: tuple()}
@type point_t :: {integer, integer}
@spec new(String.t()) :: grid_t
def new(contents) do
lines = contents |> String.split(~r/\s+/, trim: true)
width = String.length(Enum.at(lines, 0))
height = length(lines)
data =
lines
|> Enum.join()
|> String.graphemes()
|> Enum.map(&String.to_integer/1)
|> List.to_tuple()
%{width: width, height: height, size: width * height, data: data}
end
def new(width, height),
do: %{
width: width,
height: height,
size: width * height,
data: List.duplicate(0, width * height) |> List.to_tuple()
}
def string_to_point(s),
do: s |> String.split(",") |> Enum.map(&String.to_integer(String.trim(&1))) |> List.to_tuple()
@spec point_to_position(point_t, grid_t) :: integer
def point_to_position({x, y}, grid), do: grid[:width] * y + x
@spec position_to_point(integer, grid_t) :: point_t
def position_to_point(n, grid), do: {rem(n, grid[:width]), div(n, grid[:width])}
@spec get(point_t, grid_t) :: any
def get({x, y}, grid), do: elem(grid[:data], point_to_position({x, y}, grid))
@spec get(grid_t, integer) :: any
def get(grid, v), do: elem(grid[:data], v)
@spec put(point_t, grid_t, any) :: any
def put({x, y}, grid, value),
do: %{grid | data: put_elem(grid[:data], point_to_position({x, y}, grid), value)}
def increment(p, grid) do
put(p, grid, get(p, grid) + 1)
end
def is_valid_point?({x, y}, grid) do
x >= 0 && x < grid[:width] && y >= 0 && y < grid[:height]
end
def is_valid_position?(n, grid) do
n >= 0 && n < grid.size
end
def get_adjacent({x, y}, grid) do
[{x - 1, y}, {x + 1, y}, {x, y - 1}, {x, y + 1}]
|> Enum.filter(&is_valid_point?(&1, grid))
end
def get_adjacent_positions(n, grid) do
n |> position_to_point(grid) |> get_adjacent(grid) |> Enum.map(&point_to_position(&1, grid))
end
def get_surrounding({x, y}, grid) do
[
{x - 1, y},
{x + 1, y},
{x, y - 1},
{x, y + 1},
{x - 1, y - 1},
{x - 1, y + 1},
{x + 1, y - 1},
{x + 1, y + 1}
]
|> Enum.filter(&is_valid_point?(&1, grid))
end
def get_all_points(grid) do
Range.new(0, grid[:size] - 1)
|> Enum.map(&position_to_point(&1, grid))
end
@spec min_adjacent(point_t, grid_t) :: any
def min_adjacent(p, grid) do
get_adjacent(p, grid)
|> Enum.map(&get(&1, grid))
|> Enum.min()
end
def inspect(grid) do
grid[:data]
|> Tuple.to_list()
|> Enum.with_index()
|> Enum.each(fn {x, index} ->
if rem(index, grid[:width]) == 0, do: IO.write("\n")
if x == 0,
do: IO.write("."),
else: IO.write(x)
IO.write(" ")
end)
grid
end
def shrink_to(grid, nrows, ncols) do
data =
grid[:data]
|> Tuple.to_list()
|> Enum.with_index(fn x, index -> {x, index} end)
|> Enum.filter(fn {_, n} ->
{x, y} = Grid.position_to_point(n, grid)
x < ncols && y < nrows
end)
|> Enum.map(&elem(&1, 0))
|> List.to_tuple()
%{width: ncols, height: nrows, size: ncols * nrows, data: data}
end
end | aoc21/lib/grid.ex | 0.87247 | 0.674325 | grid.ex | starcoder |
require Syscrap.Helpers, as: H
defmodule Syscrap.MongoWorker do
use GenServer
@moduledoc """
This worker encapsulates a Mongo connection pooled using `:poolboy`.
No more, no less.
Well, maybe a little more. It includes some helpers to ease the work with that connection.
## Usage
### Using `get/1` & `release/1`
Use `get/1` to get a `Mongo.Collection` record and a reference to its
worker on the pool. Then perform any operation on that collection.
Remember to call `release/1` to return the worker to the pool. Once you
are done with it.
`iex` session:
```
iex(1)> alias Syscrap.MongoWorker, as: SM
iex(2)> alias Mongo.Collection, as: MC
iex(3)> :poolboy.status(Syscrap.MongoPool)
{:ready, 5, 0, 0}
iex(4)> {coll, worker} = SM.get db: "syscrap", coll: "test"
iex(6)> MC.find(coll) |> Enum.to_list
[%{_id: ObjectId(5468f42a4f9bdc79d2779e9a), a: 23},
%{_id: ObjectId(5468fb834f9bdc79d2779e9b), b: 23235}]
iex(7)> %{c: "blabla"} |> MC.insert_one(coll)
{:ok, %{c: "blabla"}}
iex(8)> MC.find(coll) |> Enum.to_list
[%{_id: ObjectId(5468f42a4f9bdc79d2779e9a), a: 23},
%{_id: ObjectId(5468fb834f9bdc79d2779e9b), b: 23235},
%{_id: ObjectId(5468fee64f9bdc79d2779e9c), c: "blabla"}]
iex(9)> :poolboy.status(Syscrap.MongoPool)
{:ready, 4, 0, 1}
iex(10)> SM.release worker
:ok
iex(11)> :poolboy.status(Syscrap.MongoPool)
{:ready, 5, 0, 0}
```
### Using `transaction`
If you prefer to use
[transaction](https://github.com/devinus/poolboy/blob/94a3f7a481f36e71d5750f76fcc3205461d3feff/src/poolboy.erl#L71)
then you just give it a `fn` that will receive a `worker`.
You can use that `worker` with `yield` to get a `Mongo.Collection`.
Then you can use that collection freely. When you are done with it,
`transaction` will properly release the worker into the pool for you.
All the way, you are protected inside a `try ... after` block, so the
worker is always returned to the pool.
```
:poolboy.transaction(Syscrap.MongoPool, fn(worker) ->
coll = worker |> yield db: 'syscrap', coll: 'test'
MC.find(coll) |> Enum.to_list
%{c: "blabla"} |> MC.insert_one(coll)
end)
```
### Using `run`
If you just need to run some queries and then get a return value, you can
use `run`.
```
result = SM.run("test", fn(coll) ->
MC.find(coll) |> Enum.to_list
%{c: "blabla"} |> MC.insert_one(coll)
MC.find(coll) |> Enum.to_list
end)
```
"""
def start_link(opts \\ []), do: GenServer.start_link(__MODULE__, :ok, opts)
@doc """
Creates the connection for this worker.
`opts` will be used when needed. By now there's no need.
"""
def init(_opts), do: {:ok, Mongo.connect!}
@doc """
Yield the connection when asked
"""
def handle_call(:yield, _from, conn), do: {:reply, conn, conn}
@doc """
Request the connection to the worker, and get db/collection for given names.
"""
def yield(server, opts) do
opts = H.defaults opts, coll: "test", db: H.env(:mongo_db_opts)[:database]
GenServer.call(server, :yield)
|> Mongo.db(opts[:db])
|> Mongo.Db.collection(opts[:coll])
end
@doc """
Get db/collection for given names using a worker from given pool
(`Syscrap.MongoPool` by default).
Returns requested Mongo collection, and the underlying worker.
You should checkin that worker back to the pool using `release/1`.
"""
def get(opts) do
opts = H.defaults opts, db: H.env(:mongo_db_opts)[:database],
coll: "test",
pool: Syscrap.MongoPool
w = :poolboy.checkout(opts[:pool])
coll = w |> yield(opts)
{coll,w}
end
@doc """
Checkin given worker to given pool (`Syscrap.MongoPool` by default).
"""
def release(worker, pool \\ Syscrap.MongoPool), do: :poolboy.checkin(pool, worker)
@doc """
Clean wrapper for a `:poolboy.transaction/2` over the default pool.
Gets a collection name, gets a handle to that collection using a pool worker,
and passes it to the given function.
Returns whatever the given function returns.
```
SM.run("my_collection", fn(coll) ->
MC.find(coll) |> Enum.to_list
%{c: "blabla"} |> MC.insert_one(coll)
MC.find(coll) |> Enum.to_list
end)
```
Always releases the worker back to the pool.
"""
def run(coll, fun) when is_binary(coll), do: run([coll: coll], fun)
def run(opts, fun) do
opts = H.defaults opts, db: H.env(:mongo_db_opts)[:database],
coll: "test",
pool: Syscrap.MongoPool
:poolboy.transaction(opts[:pool], fn(worker) ->
coll = worker |> yield(opts)
fun.(coll)
end)
end
end | lib/syscrap/mongo_worker.ex | 0.801081 | 0.842928 | mongo_worker.ex | starcoder |
defmodule Commander do
@moduledoc """
Documentation for Commander.
"""
@doc """
main - Main function of Commander. Takes an array of arguments and options and builds a configuration model based on them
## Examples
iex> Commander.handle_main(["test","-test","-othertest","--newtest=test","-nt=newtest","--doubledash"])
%Config{
arguments: ["test"],
options: %{
"doubledash" => true,
"newtest" => "test",
"nt" => "newtest",
"othertest" => true,
"test" => true
}
}
"""
def handle_main(args) do
{_arguments, config} =
{args, %Config{}}
|> map_arguments
|> map_options
config
end
@doc """
map_arguments - maps all arguments to the appropriate place in the confuration model
## Examples
iex> Commander.map_arguments({["test","-test","-othertest","--newtest=test","-nt=newtest","--doubledash"], %Config{}})
{["test","-test","-othertest","--newtest=test","-nt=newtest","--doubledash"], %Config{arguments: ["test"], options: %{}}}
"""
def map_arguments({args, config}) when is_list(args) do
args_list =
args
|> Enum.filter(fn x -> !String.starts_with?(x, "-") end)
config = %Config{config | arguments: args_list}
{args, config}
end
@doc """
map_options - maps all options to the appropriate place in the confuration model
## Examples
iex> Commander.map_options({["test","-test","-othertest","--newtest=test","-nt=newtest","--doubledash"], %Config{}})
{["test","-test","-othertest","--newtest=test","-nt=newtest","--doubledash"], %Config{arguments: [], options: %{"doubledash" => true,"newtest" => "test","nt" => "newtest","othertest" => true,"test" => true}}}
"""
def map_options({args, config}) when is_list(args) do
options =
args
|> Enum.filter(fn x -> String.starts_with?(x, "-") end)
|> Enum.map(&Commander.parse_option/1)
|> Enum.map(&reconcile_equals/1)
|> Map.new()
config = %Config{config | options: options}
{args, config}
end
@doc """
parse_option - takes an option and returns the formatted option
## Examples
iex> Commander.parse_option("--newtest=test")
"newtest=test"
"""
def parse_option(arg) do
trimmedOption =
case String.starts_with?(arg, "--") do
true -> String.trim_leading(arg, "--")
false -> lookup_full_option(arg, %{})
end
trimmedOption
end
@doc """
lookup_full_option - takes an abreviated option and returns the full option
## Examples
iex> Commander.lookup_full_option("-t=test", %{"t" => "trythisone"})
"trythisone=test"
iex> Commander.lookup_full_option("-x", %{"x" => "anotherone"})
"anotherone"
"""
def lookup_full_option(arg, full_options) do
trimmed = String.trim_leading(arg, "-")
{key, value} = reconcile_equals(trimmed)
option = Map.get(full_options, key, key)
case value !== "" && String.contains?(arg, "=") do
true -> "#{option}=#{value}"
false -> option
end
end
defp reconcile_equals(base) do
case String.contains?(base, "=") do
true -> handle_valid_equals(base)
false -> {base, true}
end
end
defp handle_valid_equals(base) do
[key | values] = String.split(base, "=")
value = Enum.join(values, "=")
{key, value}
end
end | lib/commander.ex | 0.625209 | 0.401658 | commander.ex | starcoder |
defmodule TelemetryMetricsAppsignal do
@moduledoc """
AppSignal Reporter for [`Telemetry.Metrics`](https://github.com/beam-telemetry/telemetry_metrics) definitions.
This reporter is useful for getting [custom metrics](https://docs.appsignal.com/metrics/custom.html)
into AppSignal from your application. These custom metrics are especially
useful for building custom dashboards.
To use the reporter, first define a list of metrics as shown here:
def metrics, do:
[
summary("phoenix.endpoint.stop.duration"),
last_value("vm.memory.total"),
counter("my_app.my_server.call.exception")
]
It's recommended to start TelemetryMetricsAppsignal under a supervision tree,
either in your main application or as recommended [here](https://hexdocs.pm/phoenix/telemetry.html#the-telemetry-supervisor)
if using Phoenix:
{TelemetryMetricsAppsignal, [metrics: metrics()]}
Putting that altogether, your configuration could look something like this:
def start_link(_arg) do
children = [
{TelemetryMetricsAppsignal, [metrics: metrics()]},
...
]
Supervisor.init(children, strategy: :one_for_one)
end
defp metrics, do:
[
summary("phoenix.endpoint.stop.duration"),
last_value("vm.memory.total"),
counter("my_app.my_server.call.exception")
]
Optionally you can register a name:
{TelemetryMetricsAppsignal,
[metrics: metrics(), name: MyTelemetryMetricsAppsignal]}
The following table shows how `Telemetry.Metrics` metrics map to [AppSignal
metrics](https://docs.appsignal.com/metrics/custom.html#metric-types):
| Telemetry.Metrics | AppSignal |
|-----------------------|-----------|
| `last_value` | `gauge` |
| `counter` | `counter` |
| `sum` | `counter`, increased by the provided value |
| `summary` | `measurement` |
| `distribution` | Not supported |
"""
use GenServer
require Logger
alias Telemetry.Metrics.Counter
alias Telemetry.Metrics.Distribution
alias Telemetry.Metrics.LastValue
alias Telemetry.Metrics.Sum
alias Telemetry.Metrics.Summary
@appsignal Application.compile_env(:telemetry_metrics_appsignal, :appsignal, Appsignal)
@type metric ::
Counter.t()
| Distribution.t()
| LastValue.t()
| Sum.t()
| Summary.t()
@type option :: {:metrics, [metric]} | {:name, GenServer.name()}
@spec start_link([option]) :: GenServer.on_start()
def start_link(opts) do
server_opts = Keyword.take(opts, [:name])
metrics = Keyword.get(opts, :metrics, [])
GenServer.start_link(__MODULE__, metrics, server_opts)
end
@impl true
@spec init([metric]) :: {:ok, [[atom]]}
def init(metrics) do
Process.flag(:trap_exit, true)
groups = Enum.group_by(metrics, & &1.event_name)
for {event, metrics} <- groups do
id = {__MODULE__, event, self()}
:telemetry.attach(id, event, &handle_event/4, metrics: metrics)
end
{:ok, Map.keys(groups)}
end
@impl true
def terminate(_, events) do
for event <- events do
:telemetry.detach({__MODULE__, event, self()})
end
:ok
end
defp handle_event(_event_name, measurements, metadata, config) do
metrics = Keyword.get(config, :metrics, [])
Enum.each(metrics, fn metric ->
if value = prepare_metric_value(metric, measurements) do
tags = prepare_metric_tags(metric, metadata)
send_metric(metric, value, tags)
end
end)
end
defp prepare_metric_value(metric, measurements)
defp prepare_metric_value(%Counter{}, _measurements), do: 1
defp prepare_metric_value(%{measurement: convert}, measurements) when is_function(convert) do
convert.(measurements)
end
defp prepare_metric_value(%{measurement: measurement}, measurements)
when is_map_key(measurements, measurement) do
measurements[measurement]
end
defp prepare_metric_value(_, _), do: nil
defp prepare_metric_tags(metric, metadata) do
tag_values = metric.tag_values.(metadata)
Map.take(tag_values, metric.tags)
end
defp send_metric(%Counter{} = metric, _value, tags) do
call_appsignal(:increment_counter, metric.name, 1, tags)
end
defp send_metric(%Summary{} = metric, value, tags) do
call_appsignal(
:add_distribution_value,
metric.name,
value,
tags
)
end
defp send_metric(%LastValue{} = metric, value, tags) do
call_appsignal(
:set_gauge,
metric.name,
value,
tags
)
end
defp send_metric(%Sum{} = metric, value, tags) do
call_appsignal(
:increment_counter,
metric.name,
value,
tags
)
end
defp send_metric(metric, _measurements, _tags) do
Logger.warn("Ignoring unsupported metric #{inspect(metric)}")
end
defp call_appsignal(function_name, key, value, tags) when is_list(key) do
call_appsignal(function_name, Enum.join(key, "."), value, tags)
end
defp call_appsignal(function_name, key, value, tags)
when is_binary(key) and is_number(value) and is_map(tags) do
tags
|> tag_permutations()
|> Enum.each(fn tags_permutation ->
apply(@appsignal, function_name, [key, value, tags_permutation])
end)
end
defp call_appsignal(function_name, key, value, tags) do
Logger.warn("""
Attempted to send metrics invalid with AppSignal library: \
#{inspect(function_name)}(\
#{inspect(key)}, \
#{inspect(value)}, \
#{inspect(tags)}\
)
""")
end
defp tag_permutations(map) when map == %{}, do: [%{}]
defp tag_permutations(tags) do
for {tag_name, tag_value} <- tags,
value_permutation <- [tag_value, "any"],
rest <- tag_permutations(Map.drop(tags, [tag_name])) do
Map.put(rest, tag_name, value_permutation)
end
|> Enum.uniq()
end
end | lib/telemetry_metrics_appsignal.ex | 0.866641 | 0.44342 | telemetry_metrics_appsignal.ex | starcoder |
defmodule OMG.ChildChain.Fees.FeeUpdater do
@moduledoc """
Decides whether fees will be updated from the fetched fees from the feed.
"""
alias OMG.ChildChain.Fees.FeeMerger
alias OMG.Fees
@type feed_reading_t :: {pos_integer(), Fees.full_fee_t()}
@type can_update_result_t :: {:ok, feed_reading_t()} | :no_changes
# Internal data structure resulted from merge `stored_fees` and `fetched_fees` by tx type.
# See `merge_specs_by_tx_type/2`
@typep maybe_unpaired_fee_specs_merge_t :: %{non_neg_integer() => Fees.fee_t() | {Fees.fee_t(), Fees.fee_t()}}
# As above but fully paired, which means `stored_fees` and `fetched_fees` support the same tx types
@typep paired_fee_specs_merge_t :: %{non_neg_integer() => {Fees.fee_t(), Fees.fee_t()}}
@doc """
Newly fetched fees will be effective as long as the amount change on any token is significant
or the time passed from previous update exceeds the update interval.
"""
@spec can_update(
stored_fees :: feed_reading_t(),
fetched_fees :: feed_reading_t(),
tolerance_percent :: pos_integer(),
update_interval_seconds :: pos_integer()
) :: can_update_result_t()
def can_update({_, fee_spec}, {_, fee_spec}, _tolerance_percent, _update_interval_seconds), do: :no_changes
def can_update({t0, _}, {t1, _} = updated, _tolerance_percent, update_interval_seconds)
when t0 <= t1 and t1 - t0 >= update_interval_seconds,
do: {:ok, updated}
def can_update({_, stored_fees}, {_, fetched_fees} = updated, tolerance_percent, _update_interval_seconds) do
merged = merge_specs_by_tx_type(stored_fees, fetched_fees)
with false <- stored_and_fetched_differs_on_tx_type?(merged),
false <- stored_and_fetched_differs_on_token?(merged),
amount_diffs = Map.values(FeeMerger.merge_specs(stored_fees, fetched_fees)),
false <- is_change_significant?(amount_diffs, tolerance_percent) do
:no_changes
else
_ -> {:ok, updated}
end
end
@spec merge_specs_by_tx_type(Fees.full_fee_t(), Fees.full_fee_t()) :: maybe_unpaired_fee_specs_merge_t()
defp merge_specs_by_tx_type(stored_specs, fetched_specs) do
Map.merge(stored_specs, fetched_specs, fn _t, stored_fees, fetched_fees -> {stored_fees, fetched_fees} end)
end
# Tells whether each tx_type in stored fees has a corresponding fees in fetched
# Returns `true` when there is a mismatch
@spec stored_and_fetched_differs_on_tx_type?(maybe_unpaired_fee_specs_merge_t()) :: boolean()
defp stored_and_fetched_differs_on_tx_type?(merged_specs) do
merged_specs
|> Map.values()
|> Enum.all?(&Kernel.is_tuple/1)
|> Kernel.not()
end
# Checks whether previously stored and fetched fees differs on token
# Returns `true` when there is a mismatch
@spec stored_and_fetched_differs_on_token?(paired_fee_specs_merge_t()) :: boolean()
defp stored_and_fetched_differs_on_token?(merged_specs) do
Enum.any?(merged_specs, &merge_pair_differs_on_token?/1)
end
@spec merge_pair_differs_on_token?({non_neg_integer(), {Fees.fee_t(), Fees.fee_t()}}) :: boolean()
defp merge_pair_differs_on_token?({_type, {stored_fees, fetched_fees}}) do
not MapSet.equal?(
stored_fees |> Map.keys() |> MapSet.new(),
fetched_fees |> Map.keys() |> MapSet.new()
)
end
# Change is significant when
# - token amount difference exceeds the tolerance level,
# - there is missing token in any of specs, so token support was either added or removed
# in the update.
@spec is_change_significant?(list(Fees.merged_fee_t()), non_neg_integer()) :: boolean()
defp is_change_significant?(token_amounts, tolerance_percent) do
tolerance_rate = tolerance_percent / 100
token_amounts
|> Enum.flat_map(&Map.values/1)
|> Enum.any?(&amount_diff_exceeds_tolerance?(&1, tolerance_rate))
end
defp amount_diff_exceeds_tolerance?([_no_change], _rate), do: false
defp amount_diff_exceeds_tolerance?([stored, fetched], rate) do
abs(stored - fetched) / stored >= rate
end
end | apps/omg_child_chain/lib/omg_child_chain/fees/fee_updater.ex | 0.887717 | 0.524943 | fee_updater.ex | starcoder |
defmodule Snowflake.Database do
@moduledoc """
This module was written to simulate a k/v database layer
where a value per node could be seeded and retrieved
The node ids are stored in a custom dets file
The mostly arcane :dets was used as it was natively supported
The node ids can be created by running the custom mix task `mix seed`
which invokes setup_all/0
"""
use GenServer
require Logger
@tbl :node_id_persist
@valid_ids 0..1023
@dets_file_name "unique1024.dets"
@key "node_id"
@test_mgr_node_port "1023"
def start_link() do
GenServer.start_link(__MODULE__, :ok, name: Snowflake.Database)
end
def child_spec(_arg) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, []},
type: :worker,
restart: :permanent,
name: Snowflake.Database
}
end
def get(:node_id) do
GenServer.call(__MODULE__, {:get, :node_id})
end
@impl GenServer
def init(_) do
{:ok, :ok, {:continue, :load}}
end
@impl GenServer
def handle_continue(:load, :ok) do
case load_local(@tbl, node()) do
{:ok, @tbl} ->
case read(@tbl) do
[{@key, node_id}] ->
{:noreply, node_id}
[] ->
raise "Unable to retrieve dets file contents, was it seeded correctly using mix seed?"
end
:test_node ->
{:noreply, @test_mgr_node_port}
# If we can't read from the dets file correctly raise
{:error, reason} ->
raise "Unable to open dets file #{inspect(reason)}"
end
end
@impl GenServer
def handle_call({:get, :node_id}, _, node_id) do
{:reply, node_id, node_id}
end
@doc """
Terminate callback
Close the dets file
"""
@impl GenServer
def terminate(_reason, _state) do
close(@tbl)
_ = Logger.debug("Terminating Snowflake Database Server")
:ok
end
# We similuate multiple nodes on a single host with per node directories under the
# priv directory
def setup_all() do
node_list = Application.fetch_env!(:snowflake, :nodes)
Logger.info("node list is #{inspect(node_list)}")
size = Enum.count(node_list)
# Generate the list of ids taking only the amount of nodes currently available
ids_list = @valid_ids |> Enum.take(size)
zipped = Enum.zip(node_list, ids_list)
# For each pair, setup the path and load the tables properly
# seeding with the proper value
Enum.each(zipped, fn {node_name, node_id} ->
node_name = Atom.to_string(node_name)
{:ok, @tbl} = load_single(@tbl, node_name)
seed(@tbl, node_id)
close(@tbl)
end)
end
def load_local(_table_name, :nonode@nohost), do: :test_node
def load_local(table_name, node), do: load_single(table_name, "#{node}")
defp load_single(table_name, node_name, seed \\ false)
when is_atom(table_name) and is_binary(node_name) do
# If these files don't exist write them
dir = Application.fetch_env!(:snowflake, :db_folder)
[name_prefix, _] = node_name |> String.split("@")
db_folder = "/#{dir}/#{name_prefix}/"
base_path = :code.priv_dir(:snowflake)
db_path = base_path ++ String.to_charlist(db_folder)
File.mkdir_p!(db_path)
path = Path.absname(@dets_file_name, db_path)
if seed do
File.rm(path)
end
# Erlang prefers charlist
path_charlist = String.to_charlist(path)
:dets.open_file(table_name, file: path_charlist, type: :set)
end
defp seed(table_name, value) when is_atom(table_name) and is_integer(value) do
:dets.insert_new(table_name, {@key, "#{value}"})
case read(table_name) do
[{@key, v}] when is_binary(v) ->
v
err ->
raise "Unsupported read table value #{inspect(err)}"
end
end
defp read(table_name) when is_atom(table_name) do
# Below can only be used in the REPL, have to explicitly use parse transform-ish format
# select_all = :ets.fun2ms(&(&1))
select_all = [{:"$1", [], [:"$1"]}]
:dets.select(table_name, select_all)
end
# Apparently dets likes to be explicitly closed or so I've heard :)
defp close(table_name) do
:dets.close(table_name)
end
end | lib/snowflake/database.ex | 0.757301 | 0.41481 | database.ex | starcoder |
defmodule Resx.Transformer do
@moduledoc """
A transformer is a referenceable interface for performing reproducible
modifications on resources.
A module that implements the transformer behaviour becomes usable by the
`Resx.Producers.Transform` producer.
"""
import Kernel, except: [apply: 3]
alias Resx.Resource
alias Resx.Resource.Reference
alias Resx.Resource.Reference.Integrity
@doc """
Implement the behaviour to transform a resource.
The `options` keyword allows for your implementation to expose some configurable
settings.
If the transformation was successful return `{ :ok, resource }`, where `resource`
is the newly transformed resource. Otherwise return an appropriate error.
"""
@callback transform(resource :: Resource.t, options :: keyword) :: { :ok, resource :: Resource.t } | Resx.error
@doc false
defmacro __using__(_opts) do
quote do
@behaviour Resx.Transformer
end
end
defmodule TransformError do
defexception [:message, :type, :reason, :transformer, :resource, :options]
@impl Exception
def exception({ resource, transformer, options, { type, reason } }) do
%TransformError{
message: "failed to transform resource due to #{type} error: #{inspect reason}",
type: type,
reason: reason,
resource: resource,
transformer: transformer,
options: options
}
end
end
@doc """
Apply a transformation to a resource.
A `transformer` must be a module that implements the `Resx.Transformer`
behaviour.
"""
@spec apply(Resource.t, module, keyword) :: { :ok, Resource.t } | Resx.error
def apply(resource, transformer, opts \\ []) do
case transformer.transform(resource, opts) do
{ :ok, resource = %{ reference: reference } } ->
{ :ok, %{ resource | reference: %Reference{ adapter: Resx.Producers.Transform, repository: { transformer, opts, reference }, integrity: %Integrity{ timestamp: DateTime.utc_now } } } }
{ :error, error } -> { :error, error }
end
end
@doc """
Apply a transformation to a resource.
Raises a `Resx.Transformer.TransformError` if the transformation cannot be applied.
For more details see `apply/2`.
"""
@spec apply!(Resource.t, module, keyword) :: Resource.t | no_return
def apply!(resource, transformer, opts \\ []) do
case apply(resource, transformer, opts) do
{ :ok, resource } -> resource
{ :error, error } -> raise TransformError, { resource, transformer, opts, error }
end
end
end | lib/resx/transformer.ex | 0.924338 | 0.463809 | transformer.ex | starcoder |
defmodule BitcoinSimulator.BitcoinCore do
alias BitcoinSimulator.BitcoinCore.{Blockchain, Mining, Network, RawTransaction, Wallet}
# Block Chain
def get_new_blockchain, do: Blockchain.get_new_blockchain()
def get_best_block_hash(blockchain), do: Blockchain.get_best_block_hash(blockchain)
def block_header_hash(header), do: Blockchain.block_header_hash(header)
def transaction_hash(tx), do: Blockchain.transaction_hash(tx)
def verify_block(blockchain, block), do: Blockchain.verify_block(blockchain, block)
def verify_transaction(blockchain, tx), do: Blockchain.verify_transaction(blockchain, tx)
def add_block(block, blockchain, wallet, mempool, mining_process \\ nil, mining_txs \\ nil) do
Blockchain.add_block(block, blockchain, wallet, mempool, mining_process, mining_txs)
end
# Mining
def get_new_mempool, do: Mining.get_new_mempool()
def get_top_unconfirmed_transactions(mempool), do: Mining.get_top_unconfirmed_transactions(mempool)
def get_block_template(prev_hash, txs), do: Mining.get_block_template(prev_hash, txs)
def mine(block, coinbase_addr, self_id), do: Mining.mine(block, coinbase_addr, self_id)
def add_unconfirmed_tx(mempool, tx, tx_hash), do: Mining.add_unconfirmed_tx(mempool, tx, tx_hash)
def calc_cainbase_value(blockchain, txs), do: Mining.calc_cainbase_value(blockchain, txs)
# Network
def get_new_message_record, do: %Network.MessageRecord{}
def get_initial_neighbors(id), do: Network.get_initial_neighbors(id)
def get_initial_blockchain(neighbors), do: Network.get_initial_blockchain(neighbors)
def exchange_neighbors(neighbors), do: Network.exchange_neighbors(neighbors)
def mix_neighbors(neighbors, self_id), do: Network.mix_neighbors(neighbors, self_id)
def message_seen?(record, type, hash), do: Network.message_seen?(record, type, hash)
def saw_message(record, type, hash), do: Network.saw_message(record, type, hash)
def clean_message_record(record), do: Network.clean_message_record(record)
def broadcast_message(type, message, neighbors, sender), do: Network.broadcast_message(type, message, neighbors, sender)
# Raw Transaction
def create_raw_transaction(in_addresses, out_addresses, out_values, change_address, change_value) do
RawTransaction.create_raw_transaction(in_addresses, out_addresses, out_values, change_address, change_value)
end
def create_coinbase_transaction(out_addresses, out_values), do: RawTransaction.create_coinbase_transaction(out_addresses, out_values)
# Wallet
def get_new_wallet, do: Wallet.get_new_wallet()
def get_new_address(wallet), do: Wallet.get_new_address(wallet)
def combine_unspent_addresses(wallet, target_value), do: Wallet.combine_unspent_addresses(wallet, target_value)
def spend_address(wallet, address), do: Wallet.spend_address(wallet, address)
def import_address(wallet, address), do: Wallet.import_address(wallet, address)
end | lib/bitcoin_simulator/bitcoin_core.ex | 0.682785 | 0.425068 | bitcoin_core.ex | starcoder |
defmodule Zappa.Tag do
@moduledoc """
This struct holds information relevant to parsing a handlebars tag. All helper functions registered in the
`%Zappa.Helpers{}` struct are passed a `%Zappa.Tag{}` struct as their single argument.
## %Zappa.Tag{} Keys
- `:name` - the identifying name of the tag.
- `:raw_options` - everything but the name.
- `:raw_contents` - the full raw contents (name + raw_options). E.g. `song "Joe's Garage" volume="high"` from the tag `{{song "Joe's Garage" volume="high"}}`
- `:args` - a list of parsed arguments. Each argument in the list is represented as a map with keys for `:value` and
`:quoted?` so the implementations can react differently if a value was passed directly as a variable (unquoted)
or as a literal quoted string.
- `:kwargs` - a map of [hash arguments](https://handlebarsjs.com/guide/block-helpers.html#hash-arguments).
- `:block_contents` - the full contents of a block (only applicable for block tags). The contents will be parsed or unparsed depending on how the parser encountered, i.e. `{{#block}}` tags will yield parsed `block_contents` whereas `{{{{#block}}}}` tags will yield unparsed `block_contents`.
- `:opening_delimiter` - the string that marked the beginning of the tag.
- `:closing_delimiter` - the string that marked the end of the tag.
The terminology here borrows from Python: [kwargs](https://pythontips.com/2013/08/04/args-and-kwargs-in-python-explained/)
refers to "keyword arguments".
## Examples
Tag: ``{{song "Joe's Garage" volume="high"}}`
- `:name`: `song`
- `:raw_contents`: `song "Joe's Garage" volume="high"`
- `:raw_options`: `"Joe's Garage" volume="high"`
- `:block_contents`: nil
"""
defstruct name: "",
raw_options: "",
raw_contents: "",
args: [],
kwargs: %{},
block_contents: nil,
opening_delimiter: "",
closing_delimiter: ""
@type t :: %__MODULE__{
name: String.t(),
raw_options: String.t(),
raw_contents: String.t(),
args: list,
kwargs: map,
block_contents: String.t() | nil,
opening_delimiter: String.t(),
closing_delimiter: String.t()
}
end | lib/zappa/tag.ex | 0.927929 | 0.675276 | tag.ex | starcoder |
defmodule Loom.TypedORMap do
@moduledoc """
Contains a macro that creates typed maps of CRDT's.
"""
@doc """
Creates a module that creates a map and implements the CRDT protocol for that
type.
"""
defmacro defmap(type) do
type = Macro.expand(type, __CALLER__)
name = :"#{type}Map"
quote location: :keep do
defmodule unquote(name) do
@moduledoc """
This module is automatically created via #{unquote(__MODULE__)}.defmap
It provides an interface for a CRDT to be defined for each map type.
"""
alias Loom.AWORMap, as: M
alias unquote(type), as: Type
@compile :debug_info
@type actor :: term
@type key :: term
@type crdt :: %Type{}
@type value :: term
@opaque t :: %__MODULE__{
map: M.t()
}
defstruct map: M.new()
@doc """
Returns a new #{unquote(name)}.
The identity value of an empty AWORMap is `nil` because of the difficulties
of matching against `%{}`, which is not the equivalent of `[]`.
iex> #{unquote(name)}.new |> #{unquote(name)}.value
nil
"""
@spec new :: t
def new, do: %__MODULE__{}
@doc """
Returns the currently-running delta of an #{unquote(name)}
"""
@spec delta(t) :: t
def delta(%__MODULE__{map: map}) do
M.delta(map)
|> to_type
end
@doc """
You can use this to clear the delta from an #{unquote(name)}. Clearing the delta can
help shrink the memory usage of this CRDT.
"""
@spec clear_delta(t) :: t
def clear_delta(%__MODULE__{map: map}) do
M.clear_delta(map)
|> to_type
end
@doc """
Insert a value, and merge it with any that exist already
"""
@spec put(t, actor, key, crdt) :: t
def put(%__MODULE__{map: map}, actor, key, %Type{} = value) do
M.put(map, actor, key, value)
|> to_type
end
@doc """
Delete an entry for a key-module pair
"""
@spec delete(t, key) :: t
def delete(%__MODULE__{map: map}, key) do
M.delete(map, key, Type)
|> to_type
end
@doc """
Join a map
"""
@spec join(t, t) :: t
def join(%__MODULE__{map: map1}, %__MODULE__{map: map2}) do
M.join(map1, map2)
|> to_type
end
@doc """
Empties out an existing map.
"""
@spec empty(t) :: t
def empty(%__MODULE__{map: map}) do
M.empty(map)
|> to_type
end
@doc """
Get a value for a key-module pair
"""
@spec get(t, key) :: crdt
def get(%__MODULE__{map: map}, key), do: M.get(map, key, Type)
@doc """
Get a value's value for a key-module pair
"""
@spec get_value(t, key) :: value
def get_value(%__MODULE__{map: map}, key), do: M.get_value(map, key, Type)
@doc """
Returns the set of all key-module pairs
"""
@spec keys(t) :: [key]
def keys(%__MODULE__{map: map}), do: M.keys(map)
@doc """
Tests to see if the CRDT is empty.
This is used in compositing CRDT's because CRDT's with dots might actually be
full of empty CRDT's, because we have to remain robust against undead updates
that want to feast on our collective brains. Time is a flat circle.
"""
@spec empty?(t) :: boolean
def empty?(%__MODULE__{map: map}), do: M.empty?(map)
@doc """
Checks if a key-module pair exists in the map already for the key.
"""
@spec has_key?(t, key) :: boolean
def has_key?(%__MODULE__{map: map}, key), do: M.has_key?(map, key, Type)
@doc """
Returns a map of values for key-module pairs
"""
@spec value(t) :: value
def value(%__MODULE__{map: map}), do: M.value(map)
# defoverridable [new: 0, delta: 1, clear_delta: 2, put: 3, delta: ]
@spec to_type(M.t()) :: t
defp to_type(map), do: %__MODULE__{map: map}
end
defimpl Loom.CRDT, for: unquote(name) do
alias unquote(name), as: NMap
alias unquote(type), as: Type
@doc """
Returns a description of the operations that this CRDT takes.
Updates return a new CRDT, reads can return any natural datatype. This register
returns a value.
"""
def ops(_crdt) do
[
update: [
delete: [:key, :value_type],
put: [:actor, :key, :value]
],
read: [
get: [:key],
get_value: [:key],
keys: [],
has_key: [],
value: []
]
]
end
@doc """
Applies a CRDT to a counter in an abstract way.
This is for ops-based support.
"""
def apply(crdt, {:put, actor, key, value}) do
NMap.put(crdt, actor, key, value)
end
def apply(crdt, {:delete, key}) do
NMap.delete(crdt, key)
end
def apply(crdt, {:get, key}), do: NMap.get(crdt, key)
def apply(crdt, {:get_value, key}), do: NMap.get_value(crdt, key)
def apply(crdt, {:has_key, key}), do: NMap.has_key?(crdt, key)
def apply(crdt, :keys), do: NMap.keys(crdt)
def apply(crdt, :value), do: NMap.value(crdt)
@doc """
Joins 2 CRDT's of the same type.
2 different types cannot mix (yet).
"""
def join(a, b), do: NMap.join(a, b)
@doc """
Returns the most natural primitive value for a set, a list.
"""
def value(crdt), do: NMap.value(crdt)
end
end
end
end | lib/loom/typed_map.ex | 0.852522 | 0.612773 | typed_map.ex | starcoder |
defmodule Data.Item.Compiled do
@moduledoc """
An item is compiled after the item aspects are rolled together and merged
with the base item's stats.
"""
alias Data.Item
@fields [
:id,
:level,
:name,
:description,
:type,
:keywords,
:stats,
:effects,
:cost,
:user_text,
:usee_text,
:is_usable,
:amount,
:whitelist_effects
]
defstruct @fields
@type t :: %__MODULE__{}
@doc """
The item's item aspects should be preloaded
Repo.preload(item, [item_aspectings: [:item_aspect]])
"""
def compile(item) do
__MODULE__
|> struct(Map.take(item, @fields))
|> merge_stats(item)
|> merge_effects(item)
end
@doc """
Merge stats together
"""
@spec merge_stats(t(), Item.t()) :: t()
def merge_stats(compiled_item, %{item_aspectings: item_aspectings}) do
stats =
Enum.reduce(
item_aspectings,
compiled_item.stats,
&_merge_stats(&1, &2, compiled_item.level)
)
%{compiled_item | stats: stats}
end
defp _merge_stats(%{item_aspect: %{type: "armor", stats: stats}}, acc_stats, level) do
armor = scale_for_level(level, stats.armor)
%{acc_stats | armor: acc_stats.armor + armor}
end
defp _merge_stats(_, stats, _), do: stats
@doc """
Concatenate effects of the item and all of its tags
"""
@spec merge_effects(t(), Item.t()) :: t()
def merge_effects(compiled_item, %{item_aspectings: item_aspectings}) do
effects = Enum.flat_map(item_aspectings, &_scale_effects(&1, compiled_item.level))
%{compiled_item | effects: compiled_item.effects ++ effects}
end
defp _scale_effects(%{item_aspect: %{effects: effects}}, level) do
Enum.map(effects, &_scale_effect(&1, level))
end
def _scale_effect(effect = %{kind: "damage"}, level) do
%{effect | amount: scale_for_level(level, effect.amount)}
end
def _scale_effect(effect, _level), do: effect
@doc """
Scales a value for a level. Every 10 levels the value will double
iex> Data.Item.Compiled.scale_for_level(1, 5)
5
iex> Data.Item.Compiled.scale_for_level(11, 5)
10
iex> Data.Item.Compiled.scale_for_level(11, 10)
20
"""
def scale_for_level(level, value) do
round(Float.ceil(value * (1 + (level - 1) / 10)))
end
end | lib/data/item/compiled.ex | 0.793506 | 0.520131 | compiled.ex | starcoder |
import TypeClass
defclass Witchcraft.Semigroup do
@moduledoc ~S"""
A semigroup is a structure describing data that can be appendenated with others of its type.
That is to say that appending another list returns a list, appending one map
to another returns a map, and appending two integers returns an integer, and so on.
These can be chained together an arbitrary number of times. For example:
1 <> 2 <> 3 <> 5 <> 7 == 18
[1, 2, 3] <> [4, 5, 6] <> [7, 8, 9] == [1, 2, 3, 4, 5, 6, 7, 8, 9]
"foo" <> " " <> "bar" == "foo bar"
This generalizes the idea of a monoid, as it does not require an `empty` version.
## Type Class
An instance of `Witchcraft.Semigroup` must define `Witchcraft.Semigroup.append/2`.
Semigroup [append/2]
"""
alias __MODULE__
use Witchcraft.Internal, overrides: [<>: 2]
@type t :: any()
where do
@doc ~S"""
`append`enate two data of the same type. These can be chained together an arbitrary number of times. For example:
iex> 1 |> append(2) |> append(3)
6
iex> [1, 2, 3]
...> |> append([4, 5, 6])
...> |> append([7, 8, 9])
[1, 2, 3, 4, 5, 6, 7, 8, 9]
iex> "foo" |> append(" ") |> append("bar")
"foo bar"
## Operator
iex> use Witchcraft.Semigroup
...> 1 <> 2 <> 3 <> 5 <> 7
18
iex> use Witchcraft.Semigroup
...> [1, 2, 3] <> [4, 5, 6] <> [7, 8, 9]
[1, 2, 3, 4, 5, 6, 7, 8, 9]
iex> use Witchcraft.Semigroup
...> "foo" <> " " <> "bar"
"foo bar"
There is an operator alias `a <> b`. Since this conflicts with `Kernel.<>/2`,
`use Witchcraft,Semigroup` will automatically exclude the Kernel operator.
This is highly recommended, since `<>` behaves the same on bitstrings, but is
now available on more datatypes.
"""
def append(a, b)
end
defalias a <> b, as: :append
@doc ~S"""
Flatten a list of homogeneous semigroups to a single container.
## Example
iex> concat [
...> [1, 2, 3],
...> [4, 5, 6]
...> ]
[1, 2, 3, 4, 5, 6]
"""
@spec concat(Semigroup.t()) :: [Semigroup.t()]
def concat(semigroup_of_lists) do
Enum.reduce(semigroup_of_lists, [], &Semigroup.append(&2, &1))
end
@doc ~S"""
Repeat the contents of a semigroup a certain number of times.
## Examples
iex> [1, 2, 3] |> repeat(times: 3)
[1, 2, 3, 1, 2, 3, 1, 2, 3]
"""
@spec repeat(Semigroup.t(), times: non_neg_integer()) :: Semigroup.t()
# credo:disable-for-lines:6 Credo.Check.Refactor.PipeChainStart
def repeat(to_repeat, times: times) do
fn -> to_repeat end
|> Stream.repeatedly()
|> Stream.take(times)
|> Enum.reduce(&Semigroup.append(&2, &1))
end
properties do
def associative(data) do
a = generate(data)
b = generate(data)
c = generate(data)
left = a |> Semigroup.append(b) |> Semigroup.append(c)
right = Semigroup.append(a, Semigroup.append(b, c))
equal?(left, right)
end
end
end
definst Witchcraft.Semigroup, for: Function do
def append(f, g) when is_function(g), do: Quark.compose(g, f)
end
definst Witchcraft.Semigroup, for: Witchcraft.Unit do
def append(_, _), do: %Witchcraft.Unit{}
end
definst Witchcraft.Semigroup, for: Integer do
def append(a, b), do: a + b
end
definst Witchcraft.Semigroup, for: Float do
def append(a, b), do: a + b
end
definst Witchcraft.Semigroup, for: BitString do
def append(a, b), do: Kernel.<>(a, b)
end
definst Witchcraft.Semigroup, for: List do
def append(a, b), do: a ++ b
end
definst Witchcraft.Semigroup, for: Map do
def append(a, b), do: Map.merge(a, b)
end
definst Witchcraft.Semigroup, for: MapSet do
def append(a, b), do: MapSet.union(a, b)
end
definst Witchcraft.Semigroup, for: Tuple do
# credo:disable-for-lines:5 Credo.Check.Refactor.PipeChainStart
custom_generator(_) do
Stream.repeatedly(fn -> TypeClass.Property.Generator.generate(%{}) end)
|> Enum.take(10)
|> List.to_tuple()
end
def append(tuple_a, tuple_b) do
tuple_a
|> Tuple.to_list()
|> Enum.zip(Tuple.to_list(tuple_b))
|> Enum.map(fn {x, y} -> Witchcraft.Semigroup.append(x, y) end)
|> List.to_tuple()
end
end | lib/witchcraft/semigroup.ex | 0.772574 | 0.536556 | semigroup.ex | starcoder |
defmodule Adventofcode.Day12RainRisk do
use Adventofcode
alias __MODULE__.{Captain, Position, State, Waypoint}
def part_1(input) do
input
|> parse
|> Captain.operate(State.part_1())
|> manhattan_distance
end
def part_2(input) do
input
|> parse
|> Captain.operate(State.part_2())
|> manhattan_distance
end
defmodule Captain do
def operate("E" <> val, state), do: State.move(state, {String.to_integer(val), 0})
def operate("W" <> val, state), do: State.move(state, {-String.to_integer(val), 0})
def operate("N" <> val, state), do: State.move(state, {0, String.to_integer(val)})
def operate("S" <> val, state), do: State.move(state, {0, -String.to_integer(val)})
def operate("R" <> val, state), do: State.rotate(state, String.to_integer(val))
def operate("L" <> val, state), do: State.rotate(state, 360 - String.to_integer(val))
def operate("F" <> val, state), do: State.forward(state, String.to_integer(val))
def operate(instructions, state), do: Enum.reduce(instructions, state, &operate/2)
end
defmodule State do
@enforce_keys [:waypoint, :logic]
defstruct ship: {0, 0}, waypoint: nil, logic: nil
def part_1(opts \\ []), do: struct(__MODULE__, [logic: :part_1, waypoint: {1, 0}] ++ opts)
def part_2(opts \\ []), do: struct(__MODULE__, [logic: :part_2, waypoint: {10, 1}] ++ opts)
def move(%State{logic: :part_1} = state, {east, north}) do
%{state | ship: Position.move(state.ship, {east, north})}
end
def move(%State{logic: :part_2} = state, {east, north}) do
%{state | waypoint: Position.move(state.waypoint, {east, north})}
end
def rotate(%State{} = state, degrees) do
%{state | waypoint: Waypoint.rotate(state.waypoint, degrees)}
end
def forward(%State{} = state, 0), do: state
def forward(%State{} = state, times) do
%{state | ship: Position.move(state.ship, state.waypoint)}
|> forward(times - 1)
end
end
defmodule Position do
def move({east, north}, {dx, dy}), do: {east + dx, north + dy}
end
defmodule Waypoint do
def rotate({east, north}, 90), do: {east, north} |> transpose |> flip
def rotate({east, north}, 180), do: {east, north} |> flip |> mirror
def rotate({east, north}, 270), do: {east, north} |> transpose |> mirror
defp transpose({east, north}), do: {north, east}
defp mirror({east, north}), do: {east * -1, north}
defp flip({east, north}), do: {east, north * -1}
end
def manhattan_distance(%State{ship: {east, north}}), do: abs(east) + abs(north)
def parse(input) do
input
|> String.trim()
|> String.split("\n")
end
end | lib/day_12_rain_risk.ex | 0.748995 | 0.605712 | day_12_rain_risk.ex | starcoder |
defmodule Ecto.Adapters.SQL.Sandbox do
@moduledoc """
Start a pool with a single sandboxed SQL connection.
### Options
* `:shutdown` - The shutdown method for the connections (default: 5000) (see Supervisor.Spec)
"""
alias Ecto.Adapters.Connection
@behaviour Ecto.Pool
@typep log :: (%Ecto.LogEntry{} -> any())
@doc """
Starts a pool with a single sandboxed connections for the given SQL connection
module and options.
* `conn_mod` - The connection module, see `Ecto.Adapters.Connection`
* `opts` - The options for the pool and the connections
"""
@spec start_link(module, Keyword.t) :: {:ok, pid} | {:error, any}
def start_link(conn_mod, opts) do
{name, opts} = Keyword.pop(opts, :pool_name)
args = [__MODULE__, {conn_mod, opts}, [name: name]]
child_opts = [id: __MODULE__, modules: [__MODULE__]]
children = [Supervisor.Spec.worker(GenServer, args, child_opts)]
sup_name = Module.concat(name, Supervisor)
sup_opts = [strategy: :one_for_one, max_restarts: 0, name: sup_name]
Supervisor.start_link(children, sup_opts)
end
@doc false
@spec begin(pool, log, Keyword.t, timeout) ::
:ok | {:error, :sandbox} when pool: pid | atom
def begin(pool, log, opts, timeout) do
query(pool, :begin, log, opts, timeout)
end
@doc false
@spec restart(pool, log, Keyword.t, timeout) :: :ok when pool: pid | atom
def restart(pool, log, opts, timeout) do
query(pool, :restart, log, opts, timeout)
end
@doc false
@spec rollback(pool, log, Keyword.t, timeout) :: :ok when pool: pid | atom
def rollback(pool, log, opts, timeout) do
query(pool, :rollback, log, opts, timeout)
end
@doc false
@spec mode(pool, timeout) :: :raw | :sandbox when pool: pid | atom
def mode(pool, timeout \\ 5_000) do
GenServer.call(pool, :mode, timeout)
end
@doc false
def checkout(pool, timeout) do
checkout(pool, :run, timeout)
end
@doc false
def checkin(pool, ref, _) do
GenServer.cast(pool, {:checkin, ref})
end
@doc false
def open_transaction(pool, timeout) do
checkout(pool, :transaction, timeout)
end
@doc false
def close_transaction(pool, ref, _) do
GenServer.cast(pool, {:checkin, ref})
end
@doc false
def break(pool, ref, timeout) do
GenServer.call(pool, {:break, ref}, timeout)
end
## GenServer
@doc false
def init({module, opts}) do
_ = Process.flag(:trap_exit, true)
{shutdown, params} = Keyword.pop(opts, :shutdown, 5_000)
{:ok, %{module: module, conn: nil, queue: :queue.new(), fun: nil,
ref: nil, monitor: nil, mode: :raw, params: params,
shutdown: shutdown}}
end
## Lazy connect
def handle_call(req, from, %{conn: nil} = s) do
%{module: module, params: params} = s
case Connection.connect(module, params) do
{:ok, conn} ->
handle_call(req, from, %{s | conn: conn})
{:error, reason} ->
{:stop, reason, s}
end
end
## Checkout
@doc false
def handle_call({:checkout, ref, fun}, {pid, _}, %{ref: nil} = s) do
%{module: module, conn: conn} = s
mon = Process.monitor(pid)
{:reply, {:ok, {module, conn}}, %{s | fun: fun, ref: ref, monitor: mon}}
end
def handle_call({:checkout, ref, fun}, {pid, _} = from, %{queue: q} = s) do
mon = Process.monitor(pid)
{:noreply, %{s | queue: :queue.in({:checkout, from, ref, fun, mon}, q)}}
end
## Break
def handle_call({:break, ref}, from, %{mode: :raw, ref: ref} = s) do
s = demonitor(s)
GenServer.reply(from, :ok)
s = s
|> reset()
|> dequeue()
{:noreply, s}
end
def handle_call({:break, ref}, from, %{mode: :sandbox, ref: ref} = s) do
s = demonitor(s)
GenServer.reply(from, :ok)
{:noreply, dequeue(s)}
end
## Query
def handle_call({:query, query, log, opts}, _, %{ref: nil} = s) do
{reply, s} = handle_query(query, log, opts, s)
{:reply, reply, s}
end
def handle_call({:query, query, log, opts}, from, %{queue: q} = s) do
{:noreply, %{s | queue: :queue.in({query, log, opts, from}, q)}}
end
## Mode
def handle_call(:mode, _, %{mode: mode} = s) do
{:reply, mode, s}
end
## Cancel
@doc false
def handle_cast({:cancel, ref}, %{ref: ref} = s) do
handle_cast({:checkin, ref}, s)
end
def handle_cast({:cancel, ref}, %{queue: q} = s) do
{:noreply, %{s | queue: :queue.filter(&cancel(&1, ref), q)}}
end
## Checkin
def handle_cast({:checkin, ref}, %{ref: ref} = s) do
s = s
|> demonitor()
|> dequeue()
{:noreply, s}
end
## DOWN
@doc false
def handle_info({:DOWN, mon, _, _, _}, %{monitor: mon, fun: :run} = s) do
{:noreply, dequeue(%{s | fun: nil, monitor: nil, ref: nil})}
end
def handle_info({:DOWN, mon, _, _, _}, %{monitor: mon, mode: :raw} = s) do
s = %{s | fun: nil, monitor: nil, ref: nil}
|> reset()
|> dequeue()
{:noreply, s}
end
def handle_info({:DOWN, mon, _, _, _}, %{monitor: mon, mode: :sandbox} = s) do
s = %{s | fun: nil, monitor: nil, ref: nil}
|> dequeue()
{:noreply, s}
end
def handle_info({:DOWN, mon, _, _, _}, %{queue: q} = s) do
{:noreply, %{s | queue: :queue.filter(&down(&1, mon), q)}}
end
## EXIT
def handle_info({:EXIT, conn, reason}, %{conn: conn} = s) do
{:stop, reason, %{s | conn: nil}}
end
## Info
def handle_info(_, s) do
{:noreply, s}
end
## Terminate
@doc false
def terminate(_, %{conn: conn, shutdown: shutdown}) do
conn && Connection.shutdown(conn, shutdown)
end
## Helpers
defp checkout(pool, fun, timeout) do
ref = make_ref()
case :timer.tc(fn() -> do_checkout(pool, ref, fun, timeout) end) do
{queue_time, {:ok, mod_conn}} ->
{:ok, ref, mod_conn, queue_time}
{_, {:error, _} = error} ->
error
end
end
defp do_checkout(pool, ref, fun, timeout) do
try do
GenServer.call(pool, {:checkout, ref, fun}, timeout)
catch
:exit, {:timeout, _} = reason ->
GenServer.cast(pool, {:cancel, ref})
exit(reason)
:exit, {:noproc, _} ->
{:error, :noproc}
end
end
defp query(pool, query, log, opts, timeout) do
GenServer.call(pool, {:query, query, log, opts}, timeout)
end
defp cancel({:checkout, _, ref, _, mon}, ref) do
Process.demonitor(mon, [:flush])
false
end
defp cancel(_, _) do
true
end
defp down({:checkout, _, _, _, mon}, mon) do
false
end
defp down(_, _) do
true
end
defp demonitor(%{monitor: mon} = s) do
Process.demonitor(mon, [:flush])
%{s | fun: nil, ref: nil, monitor: nil}
end
defp dequeue(%{queue: q} = s) do
case :queue.out(q) do
{{:value, {:checkout, from, ref, fun, mon}}, q} ->
%{module: module, conn: conn} = s
GenServer.reply(from, {:ok, {module, conn}})
%{s | ref: ref, fun: fun, monitor: mon, queue: q}
{{:value, {query, log, opts, from}}, q} ->
{reply, s} = handle_query(query, log, opts, %{s | queue: q})
GenServer.reply(from, reply)
dequeue(s)
{:empty, _} ->
s
end
end
def handle_query(query, log, opts, s) do
query! = &query!(&1, &2, log, opts)
case query do
:begin -> begin(s, query!)
:restart -> restart(s, query!)
:rollback -> rollback(s, query!)
end
end
defp begin(%{ref: nil, mode: :sandbox} = s, _) do
{{:error, :sandbox}, s}
end
defp begin(%{ref: nil, mode: :raw, module: module} = s, query!) do
begin_sql = module.begin_transaction()
query!.(s, begin_sql)
savepoint_sql = module.savepoint("ecto_sandbox")
query!.(s, savepoint_sql)
{:ok, %{s | mode: :sandbox}}
end
defp restart(%{ref: nil, mode: :raw} = s, query!), do: begin(s, query!)
defp restart(%{ref: nil, mode: :sandbox, module: module} = s, query!) do
sql = module.rollback_to_savepoint("ecto_sandbox")
query!.(s, sql)
{:ok, s}
end
defp rollback(%{ref: nil, mode: :raw} = s, _), do: {:ok, s}
defp rollback(%{ref: nil, mode: :sandbox, module: module} = s, query!) do
sql = module.rollback()
query!.(s, sql)
{:ok, %{s | mode: :raw}}
end
defp query!(%{module: module, conn: conn}, sql, log, opts) do
log? = Keyword.get(opts, :log, true)
{query_time, res} = :timer.tc(module, :query, [conn, sql, [], opts])
if log? do
entry = %Ecto.LogEntry{query: sql, params: [], result: res,
query_time: query_time, queue_time: nil}
log.(entry)
end
case res do
{:ok, _} ->
:ok
{:error, err} ->
raise err
end
end
defp reset(s) do
%{module: module, conn: conn, params: params, shutdown: shutdown} = s
Connection.shutdown(conn, shutdown)
case Connection.connect(module, params) do
{:ok, conn} -> %{s | conn: conn}
{:error, error} -> raise error
end
end
end | deps/ecto/lib/ecto/adapters/sql/sandbox.ex | 0.770853 | 0.418162 | sandbox.ex | starcoder |
defmodule Bonny.CRD do
@moduledoc """
Represents the `spec` portion of a Kubernetes [CustomResourceDefinition](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/) manifest.
> The CustomResourceDefinition API resource allows you to define custom resources. Defining a CRD object creates a new custom resource with a name and schema that you specify. The Kubernetes API serves and handles the storage of your custom resource.
"""
alias Bonny.CRD
@api_version "apiextensions.k8s.io/v1beta1"
@kind "CustomResourceDefinition"
@typep names_t :: %{
kind: String.t(),
singular: String.t(),
plural: String.t(),
shortNames: nil | list(String.t()),
version: String.t()
}
@typep columns_t :: %{
name: String.t(),
type: String.t(),
description: String.t(),
JSONPath: String.t()
}
@typedoc "CRD Spec"
@type t :: %__MODULE__{
scope: :namespaced | :cluster,
group: String.t(),
names: names_t,
version: String.t(),
additional_printer_columns: list(columns_t)
}
@enforce_keys [:scope, :group, :names]
@derive Jason.Encoder
defstruct additional_printer_columns: nil,
group: nil,
names: nil,
scope: :namespaced,
version: nil
@doc """
CRD Kind or plural name
## Examples
iex> Bonny.CRD.kind(%Bonny.CRD{names: %{plural: "greetings"}, scope: :namespaced, group: "test", version: "v1"})
"greetings"
"""
@spec kind(Bonny.CRD.t()) :: binary
def kind(%Bonny.CRD{names: %{plural: plural}}), do: plural
@doc """
Gets group version from CRD spec
## Examples
iex> Bonny.CRD.api_version(%Bonny.CRD{group: "hello.example.com", version: "v1", scope: :namespaced, names: %{}})
"hello.example.com/v1"
"""
@spec api_version(Bonny.CRD.t()) :: String.t()
def api_version(%Bonny.CRD{group: g, version: v}), do: "#{g}/#{v}"
@doc """
Generates the map equivalent of the Kubernetes CRD YAML manifest
```yaml
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: widgets.example.com
spec:
group: example.com
names:
kind: Widget
plural: widgets
scope: Namespaced
version: v1
```
"""
@spec to_manifest(Bonny.CRD.t()) :: map
def to_manifest(%CRD{} = crd) do
%{
apiVersion: @api_version,
kind: @kind,
metadata: %{
name: "#{crd.names.plural}.#{crd.group}",
labels: Bonny.Operator.labels()
},
spec: format_spec(crd)
}
end
@doc """
Default CLI printer columns.
These are added to the CRDs columns _when_ columns are set.
The kubernetes API returns these by default when they _are not_ set.
"""
@spec default_columns() :: list(map())
def default_columns() do
[
%{
name: "Age",
type: "date",
description:
"CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
JSONPath: ".metadata.creationTimestamp"
}
]
end
@spec format_spec(Bonny.CRD.t()) :: map
defp format_spec(%CRD{scope: scope} = crd) do
cased_scope = String.capitalize("#{scope}")
crd
|> Map.from_struct()
|> Map.put(:scope, cased_scope)
|> rename_keys(keys_to_rename())
end
@spec rename_keys(map, map) :: map
defp rename_keys(map, keymap) do
Enum.reduce(keymap, map, fn {oldkey, newkey}, agg ->
value = Map.get(agg, oldkey)
agg
|> Map.drop([oldkey])
|> Map.put(newkey, value)
end)
end
@spec keys_to_rename() :: map
defp keys_to_rename() do
%{
additional_printer_columns: :additionalPrinterColumns
}
end
end | lib/bonny/crd.ex | 0.888813 | 0.811452 | crd.ex | starcoder |
defmodule Supervisor.Spec do
@moduledoc """
Outdated functions for building child specifications.
The functions in this module are deprecated and they do not work
with the module-based child specs introduced in Elixir v1.5.
Please see the `Supervisor` documentation instead.
Convenience functions for defining supervisor specifications.
## Example
By using the functions in this module one can specify the children
to be used under a supervisor, started with `Supervisor.start_link/2`:
import Supervisor.Spec
children = [
worker(MyWorker, [arg1, arg2, arg3]),
supervisor(MySupervisor, [arg1])
]
Supervisor.start_link(children, strategy: :one_for_one)
Sometimes, it may be handy to define supervisors backed
by a module:
defmodule MySupervisor do
use Supervisor
def start_link(arg) do
Supervisor.start_link(__MODULE__, arg)
end
def init(arg) do
children = [
worker(MyWorker, [arg], restart: :temporary)
]
supervise(children, strategy: :simple_one_for_one)
end
end
Notice in this case we don't have to explicitly import
`Supervisor.Spec` as `use Supervisor` automatically does so.
Defining a module-based supervisor can be useful, for example,
to perform initialization tasks in the `c:init/1` callback.
## Supervisor and worker options
In the example above, we defined specs for workers and supervisors.
These specs (both for workers as well as supervisors) accept the
following options:
* `:id` - a name used to identify the child specification
internally by the supervisor; defaults to the given module
name for the child worker/supervisor
* `:function` - the function to invoke on the child to start it
* `:restart` - an atom that defines when a terminated child process should
be restarted (see the "Restart values" section below)
* `:shutdown` - an atom that defines how a child process should be
terminated (see the "Shutdown values" section below)
* `:modules` - it should be a list with one element `[module]`,
where module is the name of the callback module only if the
child process is a `Supervisor` or `GenServer`; if the child
process is a `GenEvent`, `:modules` should be `:dynamic`
### Restart values (:restart)
The following restart values are supported in the `:restart` option:
* `:permanent` - the child process is always restarted
* `:temporary` - the child process is never restarted (not even
when the supervisor's strategy is `:rest_for_one` or `:one_for_all`)
* `:transient` - the child process is restarted only if it
terminates abnormally, i.e., with an exit reason other than
`:normal`, `:shutdown` or `{:shutdown, term}`
Notice that supervisor that reached maximum restart intensity will exit with `:shutdown` reason.
In this case the supervisor will only restart if its child specification was defined with
the `:restart` option set to `:permanent` (the default).
### Shutdown values (`:shutdown`)
The following shutdown values are supported in the `:shutdown` option:
* `:brutal_kill` - the child process is unconditionally terminated
using `Process.exit(child, :kill)`
* `:infinity` - if the child process is a supervisor, this is a mechanism
to give the subtree enough time to shut down; it can also be used with
workers with care
* a non-negative integer - the amount of time in milliseconds
that the supervisor tells the child process to terminate by calling
`Process.exit(child, :shutdown)` and then waits for an exit signal back.
If no exit signal is received within the specified time,
the child process is unconditionally terminated
using `Process.exit(child, :kill)`
"""
@moduledoc deprecated:
"Use the new child specifications outlined in the Supervisor module instead"
@typedoc "Supported strategies"
@type strategy :: :simple_one_for_one | :one_for_one | :one_for_all | :rest_for_one
@typedoc "Supported restart values"
@type restart :: :permanent | :transient | :temporary
@typedoc "Supported shutdown values"
@type shutdown :: timeout | :brutal_kill
@typedoc "Supported worker values"
@type worker :: :worker | :supervisor
@typedoc "Supported module values"
@type modules :: :dynamic | [module]
@typedoc "Supported ID values"
@type child_id :: term
@typedoc "The supervisor specification"
@type spec ::
{child_id, start_fun :: {module, atom, [term]}, restart, shutdown, worker, modules}
@doc """
Receives a list of `children` (workers or supervisors) to
supervise and a set of `options`.
Returns a tuple containing the supervisor specification. This tuple can be
used as the return value of the `c:init/1` callback when implementing a
module-based supervisor.
## Examples
supervise(children, strategy: :one_for_one)
## Options
* `:strategy` - the restart strategy option. It can be either
`:one_for_one`, `:rest_for_one`, `:one_for_all`, or
`:simple_one_for_one`. You can learn more about strategies
in the `Supervisor` module docs.
* `:max_restarts` - the maximum number of restarts allowed in
a time frame. Defaults to `3`.
* `:max_seconds` - the time frame in which `:max_restarts` applies.
Defaults to `5`.
The `:strategy` option is required and by default a maximum of 3 restarts is
allowed within 5 seconds. Check the `Supervisor` module for a detailed
description of the available strategies.
"""
@spec supervise(
[spec],
strategy: strategy,
max_restarts: non_neg_integer,
max_seconds: pos_integer
) :: {:ok, tuple}
@deprecated "Use the new child specifications outlined in the Supervisor module instead"
def supervise(children, options) do
unless strategy = options[:strategy] do
raise ArgumentError, "expected :strategy option to be given"
end
maxR = Keyword.get(options, :max_restarts, 3)
maxS = Keyword.get(options, :max_seconds, 5)
assert_unique_ids(Enum.map(children, &get_id/1))
{:ok, {{strategy, maxR, maxS}, children}}
end
defp get_id({id, _, _, _, _, _}) do
id
end
defp get_id(other) do
raise ArgumentError,
"invalid tuple specification given to supervise/2. If you are trying to use " <>
"the map child specification that is part of the Elixir v1.5, use Supervisor.init/2 " <>
"instead of Supervisor.Spec.supervise/2. See the Supervisor module for more information. " <>
"Got: #{inspect(other)}"
end
defp assert_unique_ids([id | rest]) do
if id in rest do
raise ArgumentError,
"duplicated ID #{inspect(id)} found in the supervisor specification, " <>
"please explicitly pass the :id option when defining this worker/supervisor"
else
assert_unique_ids(rest)
end
end
defp assert_unique_ids([]) do
:ok
end
@doc """
Defines the given `module` as a worker which will be started
with the given arguments.
worker(ExUnit.Runner, [], restart: :permanent)
By default, the function `start_link` is invoked on the given
module. Overall, the default values for the options are:
[
id: module,
function: :start_link,
restart: :permanent,
shutdown: 5000,
modules: [module]
]
See the "Supervisor and worker options" section in the `Supervisor.Spec` module for more
information on the available options.
"""
@spec worker(
module,
[term],
restart: restart,
shutdown: shutdown,
id: term,
function: atom,
modules: modules
) :: spec
@deprecated "Use the new child specifications outlined in the Supervisor module instead"
def worker(module, args, options \\ []) do
child(:worker, module, args, options)
end
@doc """
Defines the given `module` as a supervisor which will be started
with the given arguments.
supervisor(module, [], restart: :permanent)
By default, the function `start_link` is invoked on the given
module. Overall, the default values for the options are:
[
id: module,
function: :start_link,
restart: :permanent,
shutdown: :infinity,
modules: [module]
]
See the "Supervisor and worker options" section in the `Supervisor.Spec` module for more
information on the available options.
"""
@spec supervisor(
module,
[term],
restart: restart,
shutdown: shutdown,
id: term,
function: atom,
modules: modules
) :: spec
@deprecated "Use the new child specifications outlined in the Supervisor module instead"
def supervisor(module, args, options \\ []) do
options = Keyword.put_new(options, :shutdown, :infinity)
child(:supervisor, module, args, options)
end
defp child(type, module, args, options) do
id = Keyword.get(options, :id, module)
modules = Keyword.get(options, :modules, modules(module))
function = Keyword.get(options, :function, :start_link)
restart = Keyword.get(options, :restart, :permanent)
shutdown = Keyword.get(options, :shutdown, 5000)
{id, {module, function, args}, restart, shutdown, type, modules}
end
defp modules(GenEvent), do: :dynamic
defp modules(module), do: [module]
end | lib/elixir/lib/supervisor/spec.ex | 0.85443 | 0.676807 | spec.ex | starcoder |
defmodule Snowflex.Connection do
@moduledoc """
Defines a Snowflake connection.
## Definition
When used, the connection expects the `:otp_app` option. You may also define a standard timeout. This will default to 60 seconds.
If `keep_alive?` is set to `true`, each worker in the connection pool will
periodically send a dummy query to Snowflake to keep the authenticated
session from expiring.
```
defmodule SnowflakeConnection do
use Snowflex.Connection,
otp_app: :my_app,
timeout: :timer.seconds(60),
keep_alive?: true
end
```
Configuration should be extended in your config files.
```
# config/prod.exs
config :my_app, SnowflakeConnection,
size: [
max: 10,
min: 5
],
connection: [
server: "snowflex.us-east-8.snowflakecomputing.com",
role: "DEV",
warehouse: "CUSTOMER_DEV_WH"
]
```
The connection will default to using the `Snowflex.Worker` module. You are able to define a diferent one for testing/development purposes in your configurations as well.
```
# config/dev.exs
config :my_app, SnowflakeConnection,
size: [
max: 1,
min: 1
],
worker: MyApp.MockWorker
```
## Usage
Ensure the connection is started as part of your application.
```
defmodule MyApp.Application do
def start(_, _) do
...
children = [
...,
SnowflakeConnection
]
end
end
```
`execute/1`
```
query = "SELECT * FROM foo"
SnowflakeConnection.execute(query)
```
`execute/2`
```
query = \"""
SELECT * FROM foo
WHERE bar = ?
\"""
SnowflakeConnection.execute(query, [Snowflex.string_param("baz")])
```
"""
@doc false
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@behaviour Snowflex.Connection
# setup compile time config
otp_app = Keyword.fetch!(opts, :otp_app)
timeout = Keyword.get(opts, :timeout, :timer.seconds(60))
map_nulls_to_nil? = Keyword.get(opts, :map_nulls_to_nil?, false)
keep_alive? = Keyword.get(opts, :keep_alive?, false)
@otp_app otp_app
@name __MODULE__
@default_size [
max: 10,
min: 5
]
@keep_alive? keep_alive?
@heartbeat_interval :timer.hours(3)
@query_opts [
timeout: timeout,
map_nulls_to_nil?: map_nulls_to_nil?
]
def child_spec(_) do
config = Application.get_env(@otp_app, __MODULE__, [])
connection = Keyword.get(config, :connection, [])
worker_module = Keyword.get(config, :worker, Snowflex.Worker)
user_size_config = Keyword.get(config, :size, [])
final_size_config = Keyword.merge(@default_size, user_size_config)
min_pool_size = Keyword.get(final_size_config, :min)
max_pool_size = Keyword.get(final_size_config, :max)
opts = [
{:name, {:local, @name}},
{:worker_module, worker_module},
{:size, max_pool_size},
{:max_overflow, min_pool_size}
]
:poolboy.child_spec(@name, opts,
connection_args: connection,
keep_alive?: @keep_alive?,
heartbeat_interval: @heartbeat_interval
)
end
@impl Snowflex.Connection
def execute(query) when is_binary(query) do
Snowflex.sql_query(@name, query, @query_opts)
end
@impl Snowflex.Connection
def execute(query, params) when is_binary(query) and is_list(params) do
Snowflex.param_query(@name, query, params, @query_opts)
end
end
end
## Callbacks
@doc """
Wraps `Snowflex.sql_query/3` and injects the relevant information from the connection
"""
@callback execute(query :: String.t()) ::
Snowflex.sql_data() | {:error, any} | {:updated, integer()}
@doc """
Wraps `Snowflex.param_query/4` and injects the relevant information from the connection
"""
@callback execute(query :: String.t(), params :: list(Snowflex.query_param())) ::
Snowflex.sql_data() | {:error, any} | {:updated, integer()}
end | lib/snowflex/connection.ex | 0.852614 | 0.775477 | connection.ex | starcoder |
defmodule LiveAttribute do
use GenServer
defstruct [:refresher, :subscribe, :target, :filter]
@moduledoc """
LiveAttribute makes binding updateable values easier. To use it add it to your LiveView using `use LiveAttribute`
and then use the function `assign_attribute(socket, subscribe_callback, property_callbacks)` to register attributes.
The attributes will listen to all incoming events and update their assigns of your LiveView automatically, saving
you the hassle of implementing independent `handle_info()` and `update_...()` calls.
## Example using LiveAttribute
```
defmodule UserLive do
use Phoenix.LiveView
use LiveAttribute
def mount(_params, _session, socket) do
{:ok, assign_attribute(socket, &Accounts.subscribe/0, users: &Accounts.list_users/0)}
end
def handle_event("delete_user", %{"id" => user_id}, socket) do
Accounts.get_user!(user_id)
|> Accounts.delete_user()
{:noreply, socket}
end
end
```
## Same Example without LiveAttribute
```
defmodule UserLive do
use Phoenix.LiveView
def mount(_params, _session, socket) do
if connected?(socket), do: Accounts.subscribe()
{:ok, update_users(socket)}
end
defp update_users(socket) do
users = Accounts.list_users()
assign(socket, users: users)
end
def handle_event("delete_user", %{"id" => user_id}, socket) do
Accounts.get_user!(user_id)
|> Accounts.delete_user()
{:noreply, socket}
end
def handle_info({Accounts, [:user, _], _}, socket) do
{:noreply, update_users(socket)}
end
end
```
"""
@type socket :: map()
@typedoc """
The refresher list or function.
Should preferably be a list of `{key, callback}` pairs to load the new attribute values.
The `callback` thereby can have optionally one argument to read context from the socket.
Alternatively the refresher can be a single argument function instead of a list. In this
case the function is applied to the socket and thus the user has to ensure that
needed `assign()` calls are made manually.
## Examples
```
iex> assign_attribute(socket, &User.subscribe(), users: &User.list_all/0)
iex> assign_attribute(socket, &User.subscribe(),
fn socket -> User.list_all() -- socket.assigns.blacklist end
)
iex> assign_attribute(socket, &User.subscribe(), fn socket ->
assign(users: User.list_all() -- socket.assigns.blacklist)
end)
```
"""
@type refresher :: [{atom(), (() -> any()) | (socket() -> any())}] | (socket() -> socket())
defmacro __using__(_opts) do
quote do
import LiveAttribute, only: [update_attribute: 2]
def handle_info({LiveAttribute, refresher}, socket) do
{:noreply, refresher.(socket)}
end
def assign_attribute(socket, {subscribe, refresher}),
do: assign_attribute(socket, subscribe, :_, refresher)
def assign_attribute(socket, {subscribe, filter, refresher}),
do: assign_attribute(socket, subscribe, filter, refresher)
@spec assign_attribute(
LiveAttribute.socket(),
(() -> any()),
any(),
LiveAttribute.refresher()
) :: LiveAttribute.socket()
def assign_attribute(socket, subscribe, filter \\ :_, refresher)
def assign_attribute(socket, subscribe, filter, refresher) when is_list(refresher) do
refresher = fn socket ->
Enum.reduce(refresher, socket, fn {key, value}, socket ->
assign(socket, [{key, LiveAttribute.apply(socket, value)}])
end)
end
assign_attribute(socket, subscribe, filter, refresher)
end
def assign_attribute(socket, subscribe, filter, refresher) when is_function(refresher, 1) do
if connected?(socket) do
LiveAttribute.new(subscribe, filter, refresher)
end
refresher.(socket)
end
end
end
@doc """
Assigns a new attribute to the given socket.
* `socket` the LiveView socket where the assigns should be executed on
* `subscribe` the subscribe callback to start the subscription e.g. `&Users.subscribe/0`
* `filter` an optional filter if you don't want to update on each event. The filter can either be an expression
using `:_` as wildcard parameter such as `{Accounts, [:user, :_], :_}`. Alternatively `filter`
can be a function with one parameter
_Note_ LiveAttribute is issuing each subscribe call in an isolated helper process, so you only need
to add filters to reduce the scope of a single subscription.
* `refresher` the function callback to load the new values after a subscription event has
fired.
## Example
```
iex> socket = assign_attribute(socket, &Users.subscribe/0, users: &Users.list_all/0)
```
"""
def assign_attribute(_socket, _subscribe, _filter \\ :_, _refresher), do: :docs
@doc """
Allows force updating an attribute. This is useful for example when the update chain of the attribute
depends on another socket.assign that is not subscribed to.
## Example
```
iex> socket = update_attribute(socket, :users)
```
"""
def update_attribute(socket, name) do
refresher =
:global.whereis_name({LiveAttribute, self(), name})
|> GenServer.call(:get_refresher)
refresher.(socket)
end
@doc false
def new(subscribe, filter, refresher) do
la = %LiveAttribute{
filter: filter,
refresher: refresher,
subscribe: subscribe,
target: self()
}
GenServer.start_link(__MODULE__, la, hibernate_after: 5_000)
end
@impl true
@doc false
def init(%LiveAttribute{target: target, subscribe: subscribe, refresher: refresher} = la) do
Process.monitor(target)
subscribe.()
if is_list(refresher) do
Enum.each(refresher, fn {key, _} ->
:global.register_name({__MODULE__, target, key}, self())
end)
end
{:ok, la}
end
@impl true
@doc false
def handle_info({:DOWN, _ref, :process, _pid}, state) do
{:stop, :normal, state}
end
@impl true
@doc false
def handle_info(
any,
%LiveAttribute{target: target, refresher: refresher, filter: filter} = state
) do
if matches?(filter, any) do
send(target, {LiveAttribute, refresher})
end
{:noreply, state}
end
@impl true
def handle_call(:get_refresher, _from, %LiveAttribute{refresher: refresher} = state) do
{:reply, refresher, state}
end
@doc false
def matches?(:_, _any), do: true
def matches?(fun, any) when is_function(fun, 1), do: fun.(any)
def matches?(same, same), do: true
def matches?(tuple1, tuple2) when is_tuple(tuple1) and is_tuple(tuple2),
do: matches?(Tuple.to_list(tuple1), Tuple.to_list(tuple2))
def matches?([head1 | rest1], [head2 | rest2]),
do: matches?(head1, head2) and matches?(rest1, rest2)
def matches?(_, _), do: false
@doc false
def apply(_socket, refresher) when is_function(refresher, 0), do: refresher.()
def apply(socket, refresher) when is_function(refresher, 1), do: refresher.(socket)
@doc false
def child_spec(init_arg) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [init_arg]}
}
|> Supervisor.child_spec([])
end
end | lib/live_attribute.ex | 0.921983 | 0.807157 | live_attribute.ex | starcoder |
defmodule Ritcoinex.Initial do
@doc """
The burn_nodes/3 is a functions that is used for build a table.
Parameters:
- 1° argument is an atom that your table of wallet get,
- 2° argument is a first name of wallet which Hash.hash_to_wallets use
to apply the name hashed of the wallet,
- 3° argument is a fee that node receive to make a transaction.
Example:
iex> burn_nodes(:luna, "i'm cat", 0.25)
{:atomic, [{:luna, "hash of: i'm cat", 0, 0.00, 0.25, []}]}
The 3th parameter of return is idblock the number of transaction in the table,
the 4th is the amount of your wallet, which obviusly is 0.00,
the 5th is the 3th argument of function burn_nodes/3.
and the 6th is a empty block that store the unit transaction of the wallet,
because the next transaction generate a new index and new name..
and they can't see all of transactions..
"""
def burn_nodes(name_table, wallet, tx_fee) do
hnwallet = Hash.hash_to_wallets(wallet)
all_nodes = Node.list()
Ritcoinex.Initial.Records.create_table_node(name_table)
Ritcoinex.Initial.Register.register_node_initial(name_table, hnwallet, tx_fee)
:mnesia.add_table_copy(name_table, all_nodes, :disc_copies)
end
@doc """
The burn_users/2 is a functions that is used for build a table.
Parameters:
- 1° argument is an atom that your table of wallet get,
- 2° argument is a first name of wallet which Hash.hash_to_wallets use
to apply the name hashed of the wallet.
Example:
iex> burn_users(:fraga, "brazillian superstar ancap")
{:atomic, [{:fraga, "hash of: brazillian superstar ancap", 0, 0.00, []}]}
Instaed of the previous function this let out the fee of transaction,
so, users can't make transaction her paids for nodes.
"""
def burn_users(name_table, wallet) do
huwallet = Hash.hash_to_wallets(wallet)
all_nodes = Node.list()
Ritcoinex.Initial.Records.create_table_user(name_table)
Ritcoinex.Initial.Register.register_user_initial(name_table, huwallet)
:mnesia.add_table_copy(name_table, all_nodes, :disc_copies)
end
end | lib/mnesia_chain/mnesia_functions/burn_chainself.ex | 0.78964 | 0.604487 | burn_chainself.ex | starcoder |
defmodule Nostrum.Cache.GuildCache do
@default_cache_implementation Nostrum.Cache.GuildCache.ETS
@moduledoc """
Cache behaviour & dispatcher for guilds.
You can call the functions provided by this module independent of which cache
is configured, and it will dispatch to the configured cache implementation.
The user-facing functions for reading the cache can be found in the "Reading
the cache" section.
By default, #{@default_cache_implementation} will be used for caching guilds.
You can override this in the `:caches` option of the `:nostrum` application
by setting the `:guilds` field to a different module implementing the
`Nostrum.Cache.GuildCache` behaviour. Any module below
`Nostrum.Cache.GuildCache` can be used as a cache.
## Writing your own guild cache
As with the other caches, the guild cache API consists of two parts:
- The functions that the user calls, such as `c:all/0` or `c:select_by/2`.
- The functions that nostrum calls, such as `c:create/1` or `c:update/1`.
These **do not create any objects in the Discord API**, they are purely
created to update the cached data from data that Discord sends us. If you
want to create objects on Discord, use the functions exposed by `Nostrum.Api`
instead.
You need to implement both of them for nostrum to work with your custom
cache. **You also need to implement `Supervisor` callbacks**, which will
start your cache as a child under `Nostrum.Cache.CacheSupervisor`: As an
example, the `Nostrum.Cache.GuildCache.ETS` implementation uses this to to
set up its ETS table it uses for caching. See the callbacks section for every
nostrum-related callback you need to implement.
The "upstream data" wording in this module references the fact that the
data that the guild cache (and other caches) retrieves represents the raw
data we receive from the upstream connection, no attempt is made by nostrum
to sanitize the data before it enters the cache. Caching implementations
need to cast the data to the resulting type themselves. A possible future
improvement would be moving the data casting into this module before the
backing cache implementation is called.
"""
alias Nostrum.Struct.Channel
alias Nostrum.Struct.Emoji
alias Nostrum.Struct.Guild
alias Nostrum.Struct.Guild.Member
alias Nostrum.Struct.Guild.Role
alias Nostrum.Struct.Message
@configured_cache :nostrum
|> Application.compile_env(:caches, %{})
|> Map.get(:guilds, @default_cache_implementation)
@typedoc "Specifies the reason for why a lookup operation has failed."
@type reason ::
:id_not_found
| :id_not_found_on_guild_lookup
@typedoc "A selector for looking up entries in the cache."
@type selector :: (Guild.t() -> any)
@typedoc "A clause for filtering guilds."
@type clause ::
{:id, Guild.id()}
| {:channel_id, Channel.id()}
| {:message, Message.t()}
@typedoc "A collection of `t:clause/0`s for filtering guilds."
@type clauses :: [clause] | map
## Supervisor callbacks
# These set up the backing cache.
@doc false
defdelegate init(init_arg), to: @configured_cache
@doc false
defdelegate start_link(init_arg), to: @configured_cache
@doc false
defdelegate child_spec(opts), to: @configured_cache
## Behaviour specification
@doc """
Retrieves all `Nostrum.Struct.Guild` from the cache.
"""
@doc section: :reading
@callback all() :: Enum.t()
@doc """
Selects guilds matching `selector` from all `Nostrum.Struct.Guild` in the cache.
"""
@doc section: :reading
@callback select_all(selector :: (Guild.t() -> any())) :: Enum.t()
@doc """
Retrives a single `Nostrum.Struct.Guild` from the cache via its `id`.
Returns `{:error, reason}` if no result was found.
## Examples
```Elixir
iex> Nostrum.Cache.GuildCache.get(0)
{:ok, %Nostrum.Struct.Guild{id: 0}}
iex> Nostrum.Cache.GuildCache.get(10)
{:error, :id_not_found_on_guild_lookup}
```
"""
@doc section: :reading
@callback get(Guild.id()) :: {:ok, Guild.t()} | {:error, reason}
@doc """
Same as `get/1`, but raises `Nostrum.Error.CacheError` in case of failure.
"""
@doc section: :reading
@callback get!(Guild.id()) :: Guild.t() | no_return
@doc """
Retrives a single `Nostrum.Struct.Guild` where it matches the `clauses`.
Returns `{:error, reason}` if no result was found.
```Elixir
iex> Nostrum.Cache.GuildCache.get_by(id: 0)
{:ok, %Nostrum.Struct.Guild{id: 0}}
iex> Nostrum.Cache.GuildCache.get_by(%{id: 0})
{:ok, %Nostrum.Struct.Guild{id: 0}}
iex> Nostrum.Cache.GuildCache.get_by(id: 10)
{:error, :id_not_found_on_guild_lookup}
```
"""
@doc section: :reading
@callback get_by(clauses) :: {:ok, Guild.t()} | {:error, reason()}
@doc """
Same as `get_by/1`, but raises `Nostrum.Error.CacheError` in case of failure.
"""
@doc section: :reading
@callback get_by!(clauses) :: Guild.t() | no_return
@doc """
Selects values using a `selector` from a `Nostrum.Struct.Guild`.
Returns `{:error, reason}` if no result was found.
## Examples
```Elixir
iex> Nostrum.Cache.GuildCache.select(0, fn guild -> guild.id end)
{:ok, 0}
iex> Nostrum.Cache.GuildCache.select(10, fn guild -> guild.id end)
{:error, :id_not_found_on_guild_lookup}
```
"""
@doc section: :reading
@callback select(Guild.id(), selector) :: {:ok, any} | {:error, reason}
@doc """
Same as `select/2`, but raises `Nostrum.Error.CacheError` in case of failure.
"""
@doc section: :reading
@callback select!(Guild.id(), selector) :: any | no_return
@doc """
Selects values using a `selector` from a `Nostrum.Struct.Guild` that matches
the `clauses`.
Returns `{:error, reason}` if no result was found.
```Elixir
iex> Nostrum.Cache.GuildCache.select_by([id: 0], fn guild -> guild.id end)
{:ok, 0}
iex> Nostrum.Cache.GuildCache.select_by(%{id: 0}, fn guild -> guild.id end)
{:ok, 0}
iex> Nostrum.Cache.GuildCache.select_by([id: 10], fn guild -> guild.id end)
{:error, :id_not_found_on_guild_lookup}
```
"""
@doc section: :reading
@callback select_by(clauses, selector) :: {:ok, any} | {:error, reason}
@doc """
Same as `select_by/2`, but raises `Nostrum.Error.CacheError` in case of failure.
"""
@doc section: :reading
@callback select_by!(clauses, selector) :: any | no_return
# Functions called from nostrum.
@doc "Create a guild in the cache."
@callback create(Guild.t()) :: true
@doc """
Update a guild from upstream data.
Return the original guild before the update, and the updated guild.
"""
@callback update(map()) :: {old_guild :: Guild.t(), updated_guild :: Guild.t()}
@doc """
Delete a guild from the cache.
Return the old guild if it was cached, or `nil` otherwise.
"""
@callback delete(Guild.id()) :: Guild.t() | nil
@doc """
Create a channel for the guild from upstream data.
Return the adapted `t:Nostrum.Struct.Channel.t/0` structure.
"""
@callback channel_create(Guild.id(), channel :: map()) :: Channel.t()
@doc """
Delete the given channel from the guild.
If the channel was cached, return the original channel. Return `:noop`
otherwise.
"""
@callback channel_delete(Guild.id(), Channel.id()) :: Channel.t() | :noop
@doc """
Update the given channel on the given guild from upstream data.
Return the original channel before the update, and the updated channel.
"""
@callback channel_update(Guild.id(), channel :: map()) ::
{old_channel :: Channel.t(), new_channel :: Channel.t()}
@doc """
Update the emoji list of the given guild from upstream data.
Discord sends us the complete emoji list on an update, which is passed here.
Return the old list of emojis before the update, and the updated list of
emojis.
"""
@callback emoji_update(Guild.id(), emojis :: [map()]) ::
{old_emojis :: [Emoji.t()], new_emojis :: [Emoji.t()]}
@doc """
Add the member for the given guild from upstream data.
Return the casted member structure.
"""
@callback member_add(Guild.id(), member :: map()) :: Member.t()
@doc """
Remove the given member for the given guild from upstream data.
Return the guild ID and old member if the member was cached. Otherwise,
return `:noop`.
"""
@callback member_remove(Guild.id(), member :: map()) ::
{Guild.id(), old_member :: Member.t()} | :noop
@doc """
Update the given member for the given guild from upstream data.
Return the guild ID that was updated, the old cached member (if the member
was known to the cache), and the updated member.
## Note regarding intents
Even if the required intents to receive `GUILD_MEMBER_UPDATE`
events are disabled to a point where we do not receive guild creation events,
it is still possible to receive the event for our own user. An example of
this can be found in [issue
#293](https://github.com/Kraigie/nostrum/issues/293). Note that the linked
issue refers to the old contents of this module before the ETS-based guild
cache was moved into `#{__MODULE__}.ETS`.
"""
@callback member_update(Guild.id(), member :: map()) ::
{Guild.id(), old_member :: Member.t() | nil, updated_member :: Member.t()}
@doc """
Bulk create multiple members in the cache from upstream data.
Return value is unused, as we currently do not dispatch a gateway for this.
"""
@callback member_chunk(Guild.id(), chunk :: [member :: map()]) :: true
@doc """
Create a role on the given guild from upstream data.
Return the casted role.
"""
@callback role_create(Guild.id(), role :: map()) :: Role.t()
@doc """
Delete the given role on the given guild.
Return the guild and the old role if it was cached, or `:noop` otherwise.
"""
@callback role_delete(Guild.id(), Role.id()) :: {Guild.id(), old_role :: Role.t()} | :noop
@doc """
Update a role on the given guild from upstream data.
Return the old role before the update and the updated role.
"""
@callback role_update(Guild.id(), role :: map()) :: {old_role :: Role.t(), new_role :: Role.t()}
@doc """
Update the voice state of the given guild from upstream data.
Note that it is recommended to drop the `:member` / `"member"` keys of
the supplied upstream data, as these would otherwise duplicate the data
that is being kept in the guild cache already.
Return the guild ID and the updated voice states of the guild.
"""
@callback voice_state_update(Guild.id(), state :: map()) :: {Guild.id(), new_state :: [map()]}
# Dispatching logic.
defdelegate all, to: @configured_cache
defdelegate select_all(selector), to: @configured_cache
defdelegate get(guild_id), to: @configured_cache
defdelegate get!(guild_id), to: @configured_cache
defdelegate get_by(clauses), to: @configured_cache
defdelegate get_by!(clauses), to: @configured_cache
defdelegate select(guild_id, selector), to: @configured_cache
defdelegate select!(guild_id, selector), to: @configured_cache
defdelegate select_by(clauses, selector), to: @configured_cache
defdelegate select_by!(clauses, selector), to: @configured_cache
defdelegate create(guild), to: @configured_cache
defdelegate update(guild), to: @configured_cache
defdelegate delete(guild_id), to: @configured_cache
defdelegate channel_create(guild_id, channel), to: @configured_cache
defdelegate channel_delete(guild_id, channel_id), to: @configured_cache
defdelegate channel_update(guild_id, channel), to: @configured_cache
defdelegate emoji_update(guild_id, emojis), to: @configured_cache
defdelegate member_add(guild_id, member), to: @configured_cache
defdelegate member_remove(guild_id, member), to: @configured_cache
defdelegate member_update(guild_id, member), to: @configured_cache
defdelegate member_chunk(guild_id, member), to: @configured_cache
defdelegate role_create(guild_id, role), to: @configured_cache
defdelegate role_delete(guild_id, role), to: @configured_cache
defdelegate role_update(guild_id, role), to: @configured_cache
defdelegate voice_state_update(guild_id, state), to: @configured_cache
end | lib/nostrum/cache/guild_cache.ex | 0.874185 | 0.772295 | guild_cache.ex | starcoder |
defmodule Ecto.Query.Builder.Join do
@moduledoc false
alias Ecto.Query.Builder
alias Ecto.Query.JoinExpr
@doc """
Escapes a join expression (not including the `on` expression).
It returns a tuple containing the binds, the on expression (if available)
and the association expression.
## Examples
iex> escape(quote(do: x in "foo"), [])
{:x, {"foo", nil}, nil}
iex> escape(quote(do: "foo"), [])
{:_, {"foo", nil}, nil}
iex> escape(quote(do: x in Sample), [])
{:x, {nil, {:__aliases__, [alias: false], [:Sample]}}, nil}
iex> escape(quote(do: c in assoc(p, :comments)), [p: 0])
{:c, nil, {0, :comments}}
"""
@spec escape(Macro.t, Keyword.t) :: {[atom], Macro.t | nil, Macro.t | nil}
def escape({:in, _, [{var, _, context}, expr]}, vars)
when is_atom(var) and is_atom(context) do
{_, expr, assoc} = escape(expr, vars)
{var, expr, assoc}
end
def escape({:__aliases__, _, _} = module, _vars) do
{:_, {nil, module}, nil}
end
def escape(string, _vars) when is_binary(string) do
{:_, {string, nil}, nil}
end
def escape({:assoc, _, [{var, _, context}, field]}, vars)
when is_atom(var) and is_atom(context) do
var = Builder.find_var!(var, vars)
field = Builder.quoted_field!(field)
{:_, nil, {var, field}}
end
def escape({:^, _, [expr]}, _vars) do
{:_, quote(do: Ecto.Query.Builder.Join.join!(unquote(expr))), nil}
end
def escape(join, _vars) do
Builder.error! "malformed join `#{Macro.to_string(join)}` in query expression"
end
@doc """
Called at runtime to check dynamic joins.
"""
def join!(expr) when is_atom(expr),
do: {nil, expr}
def join!(expr) when is_binary(expr),
do: {expr, nil}
def join!(expr),
do: Builder.error!("expected join to be a string or atom, got: `#{inspect expr}`")
@doc """
Builds a quoted expression.
The quoted expression should evaluate to a query at runtime.
If possible, it does all calculations at compile time to avoid
runtime work.
"""
@spec build(Macro.t, atom, [Macro.t], Macro.t, Macro.t, Macro.t, Macro.Env.t) :: {Macro.t, Keyword.t, non_neg_integer | nil}
def build(query, qual, binding, expr, on, count_bind, env) do
binding = Builder.escape_binding(binding)
{join_bind, join_expr, join_assoc} = escape(expr, binding)
qual = validate_qual(qual)
validate_bind(join_bind, binding)
if join_bind != :_ and !count_bind do
# If count_bind is not an integer, make it a variable.
# The variable is the getter/setter storage.
count_bind = quote(do: count_bind)
count_setter = quote(do: unquote(count_bind) = Builder.count_binds(query))
end
if on && join_assoc do
Builder.error! "cannot specify `on` on `#{qual}_join` when using association join, " <>
"add extra clauses with `where` instead"
end
binding = binding ++ [{join_bind, count_bind}]
join_on = escape_on(on || true, binding, env)
join =
quote do
%JoinExpr{qual: unquote(qual), source: unquote(join_expr),
on: unquote(join_on), assoc: unquote(join_assoc),
file: unquote(env.file), line: unquote(env.line)}
end
if is_integer(count_bind) do
count_bind = count_bind + 1
quoted = Builder.apply_query(query, __MODULE__, [join], env)
else
count_bind = quote(do: unquote(count_bind) + 1)
quoted =
quote do
query = Ecto.Queryable.to_query(unquote(query))
unquote(count_setter)
%{query | joins: query.joins ++ [unquote(join)]}
end
end
{quoted, binding, count_bind}
end
def apply(query, expr) do
query = Ecto.Queryable.to_query(query)
%{query | joins: query.joins ++ [expr]}
end
defp escape_on(on, binding, env) do
{on, params} = Builder.escape(on, :boolean, %{}, binding)
params = Builder.escape_params(params)
quote do: %Ecto.Query.QueryExpr{
expr: unquote(on),
params: unquote(params),
line: unquote(env.line),
file: unquote(env.file)}
end
defp validate_qual(qual) when is_atom(qual) do
qual!(qual)
end
defp validate_qual(qual) do
quote(do: Ecto.Query.Builder.Join.qual!(unquote(qual)))
end
defp validate_bind(bind, all) do
if bind != :_ and bind in all do
Builder.error! "variable `#{bind}` is already defined in query"
end
end
@qualifiers [:inner, :left, :right, :full]
@doc """
Called at runtime to check dynamic qualifier.
"""
def qual!(qual) when qual in @qualifiers, do: qual
def qual!(qual) do
Builder.error! "invalid join qualifier `#{inspect qual}`, accepted qualifiers are: " <>
Enum.map_join(@qualifiers, ", ", &"`#{inspect &1}`")
end
end | lib/ecto/query/builder/join.ex | 0.869021 | 0.50891 | join.ex | starcoder |
defmodule Recase do
@moduledoc """
Recase allows you to convert string from any to any case.
This module contains public interface.
"""
alias Recase.{
CamelCase,
ConstantCase,
DotCase,
KebabCase,
PascalCase,
PathCase,
SentenceCase,
SnakeCase,
TitleCase
}
@doc """
Converts string to PascalCase (aka UpperCase).
## Examples
iex> Recase.to_pascal("some-value")
"SomeValue"
iex> Recase.to_pascal("some value")
"SomeValue"
"""
@spec to_pascal(String.t) :: String.t
def to_pascal(value), do: PascalCase.convert(value)
@doc """
Converts string to camelCase.
## Examples
iex> Recase.to_camel("some-value")
"someValue"
iex> Recase.to_camel("Some Value")
"someValue"
"""
@spec to_camel(String.t) :: String.t
def to_camel(value), do: CamelCase.convert(value)
@doc """
Converts string to snake_case.
## Examples
iex> Recase.to_snake("some-value")
"some_value"
iex> Recase.to_snake("someValue")
"some_value"
"""
@spec to_snake(String.t) :: String.t
def to_snake(value), do: SnakeCase.convert(value)
@doc """
Converts string to kebab-case.
## Examples
iex> Recase.to_kebab("SomeValue")
"some-value"
iex> Recase.to_kebab("some value")
"some-value"
"""
@spec to_kebab(String.t) :: String.t
def to_kebab(value), do: KebabCase.convert(value)
@doc """
Converts string to CONSTANT_CASE.
## Examples
iex> Recase.to_constant("SomeValue")
"SOME_VALUE"
iex> Recase.to_constant("some value")
"SOME_VALUE"
"""
@spec to_constant(String.t) :: String.t
def to_constant(value), do: ConstantCase.convert(value)
@doc ~S"""
Converts string to path/case.
## Examples
iex> Recase.to_path("SomeValue")
"Some/Value"
iex> Recase.to_path("some value", "\\")
"some\\value"
"""
@spec to_path(String.t, String.t) :: String.t
def to_path(value, separator), do: PathCase.convert(value, separator)
@spec to_path(String.t) :: String.t
def to_path(value), do: PathCase.convert(value)
@doc """
Converts string to dot.case
## Examples
iex> Recase.to_dot("SomeValue")
"some.value"
iex> Recase.to_dot("some value")
"some.value"
"""
@spec to_dot(String.t) :: String.t
def to_dot(value), do: DotCase.convert(value)
@doc """
Converts string to Sentence case
## Examples
iex> Recase.to_sentence("SomeValue")
"Some value"
iex> Recase.to_sentence("some value")
"Some value"
"""
@spec to_sentence(String.t) :: String.t
def to_sentence(value), do: SentenceCase.convert(value)
@doc """
Converts string to Title Case
## Examples
iex> Recase.to_title("SomeValue")
"Some Value"
iex> Recase.to_title("some value")
"Some Value"
"""
@spec to_title(String.t) :: String.t
def to_title(value), do: TitleCase.convert(value)
end | lib/recase.ex | 0.834744 | 0.540681 | recase.ex | starcoder |
defmodule Redix do
@moduledoc """
This module provides the main API to interface with Redis.
## Overview
`start_link/2` starts a process that connects to Redis. Each Elixir process
started with this function maps to a client TCP connection to the specified
Redis server.
The architecture is very simple: when you issue commands to Redis (via
`command/3` or `pipeline/3`), the Redix process sends the command to Redis right
away and is immediately able to send new commands. When a response arrives
from Redis, only then the Redix process replies to the caller with the
response. This pattern avoids blocking the Redix process for each request (until
a response arrives), increasing the performance of this driver.
## Reconnections
Redix tries to be as resilient as possible: it tries to recover automatically
from most network errors.
If there's a network error when sending data to Redis or if the connection to Redis
drops, Redix tries to reconnect. The first reconnection attempt will happen
after a fixed time interval; if this attempt fails, reconnections are
attempted until successful, and the time interval between reconnections is
increased exponentially. Some aspects of this behaviour can be configured; see
`start_link/2` and the "Reconnections" page in the docs for more information.
## Sentinel
**Note**: support for Redis Sentinel **is still experimental**. It works, but the API might
change a little bit and the design might be revisited.
Redix supports [Redis Sentinel](https://redis.io/topics/sentinel) by passing a `:sentinel`
option to `start_link/1` (or `start_link/2`) instead of `:host` and `:port`. In `:sentinel`,
you'll specify a list of sentinel nodes to try when connecting and the name of a primary group
(see `start_link/1` for more detailed information on these options). When connecting, Redix will
attempt to connect to each of the specified sentinels in the given order. When it manages to
connect to a sentinel, it will ask that sentinel for the address of the primary for the given
primary group. Then, it will connect to that primary and ask it for confirmation that it is
indeed a primary. If anything in this process doesn't go right, the next sentinel in the list
will be tried.
All of this happens in case of disconnections as well. If there's a disconnection, the whole
process of asking sentinels for a primary is executed again.
You should only care about Redis Sentinel when starting a `Redix` connection: once started,
using the connection will be exactly the same as the non-sentinel scenario.
## Transactions or pipelining?
Pipelining and transactions have things in common but they're fundamentally different.
With a pipeline, you're sending all commands in the pipeline *at once* on the connection
to Redis. This means Redis receives all commands at once, but the Redis server is not
guaranteed to process all those commands at once.
On the other hand, a `MULTI`/`EXEC` transaction guarantees that when `EXEC` is called
all the queued commands in the transaction are executed atomically. However, you don't
need to send all the commands in the transaction at once. If you want to combine
pipelining with `MULTI`/`EXEC` transactions, use `transaction_pipeline/3`.
## Skipping replies
Redis provides commands to control whether you want replies to your commands or not.
These commands are `CLIENT REPLY ON`, `CLIENT REPLY SKIP`, and `CLIENT REPLY OFF`.
When you use `CLIENT REPLY SKIP`, only the command that follows will not get a reply.
When you use `CLIENT REPLY OFF`, all the commands that follow will not get replies until
`CLIENT REPLY ON` is issued. Redix does not support these commands directly because they
would change the whole state of the connection. To skip replies, use `noreply_pipeline/3`
or `noreply_command/3`.
Skipping replies is useful to improve performance when you want to issue many commands
but are not interested in the responses to those commands.
## SSL
Redix supports SSL by passing `ssl: true` in `start_link/1`. You can use the `:socket_opts`
option to pass options that will be used by the SSL socket, like certificates.
If the [CAStore](https://hex.pm/packages/castore) dependency is available, Redix will pick
up its CA certificate store file automatically. You can select a different CA certificate
store by passing in the `:cacertfile` or `:cacerts` socket options. If the server uses a
self-signed certificate, such as for testing purposes, disable certificate verification by
passing `verify: :verify_none` in the socket options.
Some Redis servers, notably Amazon ElastiCache, use wildcard certificates that require
additional socket options for succesful verification (requires OTP 21.0 or later):
Redix.start_link(
host: "example.com", port: 9999, ssl: true,
socket_opts: [
customize_hostname_check: [
match_fun: :public_key.pkix_verify_hostname_match_fun(:https)
]
]
)
## Telemetry
Redix uses Telemetry for instrumentation and logging. See `Redix.Telemetry`.
"""
# This module is only a "wrapper" module that exposes the public API alongside
# documentation for it. The real work is done in Redix.Connection and every
# function in this module goes through Redix.Connection.pipeline/3 one way or
# another.
@type command() :: [String.Chars.t()]
@type connection() :: GenServer.server()
@default_timeout 5000
@doc """
Starts a connection to Redis.
This function returns `{:ok, pid}` if the Redix process is started
successfully.
{:ok, pid} = Redix.start_link()
The actual TCP connection to the Redis server may happen either synchronously,
before `start_link/2` returns, or asynchronously. This behaviour is decided by
the `:sync_connect` option (see below).
This function accepts one argument which can either be an string representing
a URI or a keyword list of options.
## Using in supervision trees
Redix supports child specs, so you can use it as part of a supervision tree:
children = [
{Redix, host: "redix.myapp.com", name: :redix}
]
See `child_spec/1` for more information.
## Using a Redis URI
In case `uri_or_opts` is a Redis URI, it must be in the form:
redis://[:password@]host[:port][/db]
Here are some examples of valid URIs:
* `redis://localhost`
* `redis://:secret@localhost:6397`
* `redis://example.com:6380/1`
Usernames before the password are ignored, so the these two URIs are
equivalent:
redis://:secret@localhost
redis://myuser:secret@localhost
The only mandatory thing when using URIs is the host. All other elements are optional
and their default value can be found in the "Options" section below.
## Options
### Redis options
The following options can be used to specify the parameters used to connect to
Redis (instead of a URI as described above):
* `:host` - (string) the host where the Redis server is running. Defaults to
`"localhost"`.
* `:port` - (positive integer) the port on which the Redis server is
running. Defaults to `6379`.
* `:password` - (string) the password used to connect to Redis. Defaults to
`nil`, meaning no password is used. When this option is provided, all Redix
does is issue an `AUTH` command to Redis in order to authenticate.
* `:database` - (non-negative integer or string) the database to connect to.
Defaults to `nil`, meaning Redix doesn't connect to a specific database (the
default in this case is database `0`). When this option is provided, all Redix
does is issue a `SELECT` command to Redis in order to select the given database.
### Connection options
The following options can be used to tweak how the Redix connection behaves.
* `:socket_opts` - (list of options) this option specifies a list of options
that are passed to the network layer when connecting to the Redis
server. Some socket options (like `:active` or `:binary`) will be
overridden by Redix so that it functions properly.
Defaults to `[]` for TCP and `[verify: :verify_peer, depth: 2]` for SSL.
If the `CAStore` dependency is available, the `cacertfile` option is added
to the SSL options by default as well.
* `:timeout` - (integer) connection timeout (in milliseconds) also directly
passed to the network layer. Defaults to `5000`.
* `:sync_connect` - (boolean) decides whether Redix should initiate the TCP
connection to the Redis server *before* or *after* returning from
`start_link/1`. This option also changes some reconnection semantics; read
the "Reconnections" page in the docs.
* `:exit_on_disconnection` - (boolean) if `true`, the Redix server will exit
if it fails to connect or disconnects from Redis. Note that setting this
option to `true` means that the `:backoff_initial` and `:backoff_max` options
will be ignored. Defaults to `false`.
* `:backoff_initial` - (non-negative integer) the initial backoff time (in milliseconds),
which is the time that the Redix process will wait before
attempting to reconnect to Redis after a disconnection or failed first
connection. See the "Reconnections" page in the docs for more information.
* `:backoff_max` - (positive integer) the maximum length (in milliseconds) of the
time interval used between reconnection attempts. See the "Reconnections"
page in the docs for more information.
* `:log` - (keyword list) a keyword list of `{action, level}` where `level` is
the log level to use to log `action`. **This option is deprecated** in favor
of Telemetry. See the "Telemetry" section in the module documentation.
The possible actions and their default values are:
* `:disconnection` (defaults to `:error`) - logged when the connection to
Redis is lost
* `:failed_connection` (defaults to `:error`) - logged when Redix can't
establish a connection to Redis
* `:reconnection` (defaults to `:info`) - logged when Redix manages to
reconnect to Redis after the connection was lost
* `:name` - Redix is bound to the same registration rules as a `GenServer`. See the
`GenServer` documentation for more information.
* `:ssl` - (boolean) if `true`, connect through SSL, otherwise through TCP. The
`:socket_opts` option applies to both SSL and TCP, so it can be used for things
like certificates. See `:ssl.connect/4`. Defaults to `false`.
* `:sentinel` - (keyword list) options for using
[Redis Sentinel](https://redis.io/topics/sentinel). If this option is provided, then the
`:host` and `:port` option cannot be provided. For the available sentinel options, see the
"Sentinel options" section below.
### Sentinel options
The following options can be used to configure the Redis Sentinel behaviour when connecting.
These options should be passed in the `:sentinel` key in the connection options. For more
information on support for Redis sentinel, see the `Redix` module documentation.
* `:sentinels` - (list) a list of sentinel addresses. Each element in this list is the address
of a sentinel to be contacted in order to obtain the address of a primary. The address of
a sentinel can be passed as a Redis URI (see the "Using a Redis URI" section above) or
a keyword list with `:host`, `:port`, `:password` options (same as when connecting to a
Redis instance direclty). Note that the password can either be passed in the sentinel
address or globally -- see the `:password` option below. This option is required.
* `:group` - (binary) the name of the group that identifies the primary in the sentinel
configuration. This option is required.
* `:role` - (`:primary` or `:replica`) if `:primary`, the connection will be established
with the primary for the given group. If `:replica`, Redix will ask the sentinel for all
the available replicas for the given group and try to connect to one of them **at random**.
Defaults to `:primary`.
* `:socket_opts` - (list of options) the socket options that will be used when connecting to
the sentinels. Defaults to `[]`.
* `:ssl` - (boolean) if `true`, connect to the sentinels via through SSL, otherwise through
TCP. The `:socket_opts` applies to both TCP and SSL, so it can be used for things like
certificates. See `:ssl.connect/4`. Defaults to `false`.
* `:timeout` - (timeout) the timeout (in milliseconds or `:infinity`) that will be used to
interact with the sentinels. This timeout will be used as the timeout when connecting to
each sentinel and when asking sentinels for a primary. The Redis documentation suggests
to keep this timeout short so that connection to Redis can happen quickly.
* `:password` - (string) if you don't want to specify a password for each sentinel you
list, you can use this option to specify a password that will be used to authenticate
on sentinels if they don't specify a password. This option is recommended over passing
a password for each sentinel because in the future we might do sentinel auto-discovery,
which means authentication can only be done through a global password that works for all
sentinels.
## Examples
iex> Redix.start_link()
{:ok, #PID<...>}
iex> Redix.start_link(host: "example.com", port: 9999, password: "<PASSWORD>")
{:ok, #PID<...>}
iex> Redix.start_link(database: 3, name: :redix_3)
{:ok, #PID<...>}
"""
@spec start_link(binary() | keyword()) :: :gen_statem.start_ret()
def start_link(uri_or_opts \\ [])
def start_link(uri) when is_binary(uri), do: start_link(uri, [])
def start_link(opts) when is_list(opts), do: Redix.Connection.start_link(opts)
@doc """
Starts a connection to Redis.
This is the same as `start_link/1`, but the URI and the options get merged. `other_opts` have
precedence over the things specified in `uri`. Take this code:
start_link("redis://localhost:6379", port: 6380)
In this example, port `6380` will be used.
"""
@spec start_link(binary(), keyword()) :: :gen_statem.start_ret()
def start_link(uri, other_opts)
def start_link(uri, other_opts) when is_binary(uri) and is_list(other_opts) do
opts = Redix.URI.opts_from_uri(uri)
start_link(Keyword.merge(opts, other_opts))
end
@doc """
Returns a child spec to use Redix in supervision trees.
To use Redix with the default options (same as calling `start_link()`):
children = [
Redix,
# ...
]
You can pass options:
children = [
{Redix, host: "redix.example.com", name: :redix},
# ...
]
You can also pass a URI:
children = [
{Redix, "redis://redix.example.com:6380"}
]
If you want to pass both a URI and options, you can do it by passing a tuple with the URI as the
first element and the list of options (make sure it has brackets around if using literals) as
the second element:
children = [
{Redix, {"redis://redix.example.com", [name: :redix]}}
]
"""
@spec child_spec(uri | keyword() | {uri, keyword()}) :: Supervisor.child_spec()
when uri: binary()
def child_spec(uri_or_opts)
def child_spec({uri, opts}) when is_binary(uri) and is_list(opts) do
child_spec_with_args([uri, opts])
end
def child_spec(uri_or_opts) when is_binary(uri_or_opts) or is_list(uri_or_opts) do
child_spec_with_args([uri_or_opts])
end
defp child_spec_with_args(args) do
%{
id: __MODULE__,
type: :worker,
start: {__MODULE__, :start_link, args}
}
end
@doc """
Closes the connection to the Redis server.
This function is synchronous and blocks until the given Redix connection frees
all its resources and disconnects from the Redis server. `timeout` can be
passed to limit the amout of time allowed for the connection to exit; if it
doesn't exit in the given interval, this call exits.
## Examples
iex> Redix.stop(conn)
:ok
"""
@spec stop(connection(), timeout()) :: :ok
def stop(conn, timeout \\ :infinity) do
Redix.Connection.stop(conn, timeout)
end
@doc """
Issues a pipeline of commands on the Redis server.
`commands` must be a list of commands, where each command is a list of strings
making up the command and its arguments. The commands will be sent as a single
"block" to Redis, and a list of ordered responses (one for each command) will
be returned.
The return value is `{:ok, results}` if the request is successful, `{:error,
reason}` otherwise.
Note that `{:ok, results}` is returned even if `results` contains one or more
Redis errors (`Redix.Error` structs). This is done to avoid having to walk the
list of results (a `O(n)` operation) to look for errors, leaving the
responsibility to the user. That said, errors other than Redis errors (like
network errors) always cause the return value to be `{:error, reason}`.
If `commands` is an empty list (`[]`) or any of the commands in `commands` is
an empty command (`[]`) then an `ArgumentError` exception is raised right
away.
Pipelining is not the same as a transaction. For more information, see the
module documentation.
## Options
* `:timeout` - (integer or `:infinity`) request timeout (in
milliseconds). Defaults to `#{@default_timeout}`. If the Redis server
doesn't reply within this timeout, `{:error,
%Redix.ConnectionError{reason: :timeout}}` is returned.
## Examples
iex> Redix.pipeline(conn, [["INCR", "mykey"], ["INCR", "mykey"], ["DECR", "mykey"]])
{:ok, [1, 2, 1]}
iex> Redix.pipeline(conn, [["SET", "k", "foo"], ["INCR", "k"], ["GET", "k"]])
{:ok, ["OK", %Redix.Error{message: "ERR value is not an integer or out of range"}, "foo"]}
If Redis goes down (before a reconnection happens):
iex> {:error, error} = Redix.pipeline(conn, [["SET", "mykey", "foo"], ["GET", "mykey"]])
iex> error.reason
:closed
"""
@spec pipeline(connection(), [command()], keyword()) ::
{:ok, [Redix.Protocol.redis_value()]}
| {:error, atom() | Redix.Error.t() | Redix.ConnectionError.t()}
def pipeline(conn, commands, opts \\ []) do
assert_valid_pipeline_commands(commands)
pipeline_without_checks(conn, commands, opts)
end
@doc """
Issues a pipeline of commands to the Redis server, raising if there's an error.
This function works similarly to `pipeline/3`, except:
* if there are no errors in issuing the commands (even if there are one or
more Redis errors in the results), the results are returned directly (not
wrapped in a `{:ok, results}` tuple).
* if there's a connection error then a `Redix.ConnectionError` exception is raised.
For more information on why nothing is raised if there are one or more Redis
errors (`Redix.Error` structs) in the list of results, look at the
documentation for `pipeline/3`.
This function accepts the same options as `pipeline/3`.
## Options
* `:timeout` - (integer or `:infinity`) request timeout (in
milliseconds). Defaults to `#{@default_timeout}`. If the Redis server
doesn't reply within this timeout, `{:error,
%Redix.ConnectionError{reason: :timeout}}` is returned.
## Examples
iex> Redix.pipeline!(conn, [["INCR", "mykey"], ["INCR", "mykey"], ["DECR", "mykey"]])
[1, 2, 1]
iex> Redix.pipeline!(conn, [["SET", "k", "foo"], ["INCR", "k"], ["GET", "k"]])
["OK", %Redix.Error{message: "ERR value is not an integer or out of range"}, "foo"]
If Redis goes down (before a reconnection happens):
iex> Redix.pipeline!(conn, [["SET", "mykey", "foo"], ["GET", "mykey"]])
** (Redix.ConnectionError) :closed
"""
@spec pipeline!(connection(), [command()], keyword()) ::
[Redix.Protocol.redis_value()] | no_return()
def pipeline!(conn, commands, opts \\ []) do
case pipeline(conn, commands, opts) do
{:ok, response} -> response
{:error, error} -> raise error
end
end
@doc """
Issues a pipeline of commands to the Redis server, asking the server to not send responses.
This function is useful when you want to issue commands to the Redis server but you don't
care about the responses. For example, you might want to set a bunch of keys but you don't
care for a confirmation that they were set. In these cases, you can save bandwith by asking
Redis to not send replies to your commands.
Since no replies are sent back, this function returns `:ok` in case there are no network
errors, or `{:error, reason}` otherwise
## Options
* `:timeout` - (integer or `:infinity`) request timeout (in
milliseconds). Defaults to `#{@default_timeout}`. If the Redis server
doesn't reply within this timeout, `{:error,
%Redix.ConnectionError{reason: :timeout}}` is returned.
## Examples
iex> commands = [["INCR", "mykey"], ["INCR", "meykey"]]
iex> Redix.noreply_pipeline(conn, commands)
:ok
iex> Redix.command(conn, ["GET", "mykey"])
{:ok, "2"}
"""
# TODO: use @doc since directly when we depend on 1.7+.
if Version.match?(System.version(), "~> 1.7"), do: @doc(since: "0.8.0")
@spec noreply_pipeline(connection(), [command()], keyword()) ::
:ok | {:error, atom() | Redix.Error.t() | Redix.ConnectionError.t()}
def noreply_pipeline(conn, commands, opts \\ []) do
assert_valid_pipeline_commands(commands)
commands = [["CLIENT", "REPLY", "OFF"]] ++ commands ++ [["CLIENT", "REPLY", "ON"]]
# The "OK" response comes from the last "CLIENT REPLY ON".
with {:ok, ["OK"]} <- pipeline_without_checks(conn, commands, opts),
do: :ok
end
@doc """
Same as `noreply_pipeline/3` but raises in case of errors.
"""
# TODO: use @doc since directly when we depend on 1.7+.
if Version.match?(System.version(), "~> 1.7"), do: @doc(since: "0.8.0")
@spec noreply_pipeline!(connection(), [command()], keyword()) :: :ok
def noreply_pipeline!(conn, commands, opts \\ []) do
case noreply_pipeline(conn, commands, opts) do
:ok -> :ok
{:error, error} -> raise error
end
end
@doc """
Issues a command on the Redis server.
This function sends `command` to the Redis server and returns the response
returned by Redis. `pid` must be the pid of a Redix connection. `command` must
be a list of strings making up the Redis command and its arguments.
The return value is `{:ok, response}` if the request is successful and the
response is not a Redis error. `{:error, reason}` is returned in case there's
an error in the request (such as losing the connection to Redis in between the
request). `reason` can also be a `Redix.Error` exception in case Redis is
reachable but returns an error (such as a type error).
If the given command is an empty command (`[]`), an `ArgumentError`
exception is raised.
## Options
* `:timeout` - (integer or `:infinity`) request timeout (in
milliseconds). Defaults to `#{@default_timeout}`. If the Redis server
doesn't reply within this timeout, `{:error,
%Redix.ConnectionError{reason: :timeout}}` is returned.
## Examples
iex> Redix.command(conn, ["SET", "mykey", "foo"])
{:ok, "OK"}
iex> Redix.command(conn, ["GET", "mykey"])
{:ok, "foo"}
iex> Redix.command(conn, ["INCR", "mykey"])
{:error, "ERR value is not an integer or out of range"}
If Redis goes down (before a reconnection happens):
iex> {:error, error} = Redix.command(conn, ["GET", "mykey"])
iex> error.reason
:closed
"""
@spec command(connection(), command(), keyword()) ::
{:ok, Redix.Protocol.redis_value()}
| {:error, atom() | Redix.Error.t() | Redix.ConnectionError.t()}
def command(conn, command, opts \\ []) do
case pipeline(conn, [command], opts) do
{:ok, [%Redix.Error{} = error]} -> {:error, error}
{:ok, [response]} -> {:ok, response}
{:error, _reason} = error -> error
end
end
@doc """
Issues a command on the Redis server, raising if there's an error.
This function works exactly like `command/3` but:
* if the command is successful, then the result is returned directly (not wrapped in a
`{:ok, result}` tuple).
* if there's a Redis error or a connection error, a `Redix.Error` or `Redix.ConnectionError`
error is raised.
This function accepts the same options as `command/3`.
## Options
* `:timeout` - (integer or `:infinity`) request timeout (in
milliseconds). Defaults to `#{@default_timeout}`. If the Redis server
doesn't reply within this timeout, `{:error,
%Redix.ConnectionError{reason: :timeout}}` is returned.
## Examples
iex> Redix.command!(conn, ["SET", "mykey", "foo"])
"OK"
iex> Redix.command!(conn, ["INCR", "mykey"])
** (Redix.Error) ERR value is not an integer or out of range
If Redis goes down (before a reconnection happens):
iex> Redix.command!(conn, ["GET", "mykey"])
** (Redix.ConnectionError) :closed
"""
@spec command!(connection(), command(), keyword()) :: Redix.Protocol.redis_value() | no_return()
def command!(conn, command, opts \\ []) do
case command(conn, command, opts) do
{:ok, response} -> response
{:error, error} -> raise error
end
end
@doc """
Same as `command/3` but tells the Redis server to not return a response.
This function is useful when you want to send a command but you don't care about the response.
Since the response is not returned, the return value of this function in case the command
is successfully sent to Redis is `:ok`.
Not receiving a response means saving traffic on the network and memory allocation for the
response. See also `noreply_pipeline/3`.
## Options
* `:timeout` - (integer or `:infinity`) request timeout (in
milliseconds). Defaults to `#{@default_timeout}`. If the Redis server
doesn't reply within this timeout, `{:error,
%Redix.ConnectionError{reason: :timeout}}` is returned.
## Examples
iex> Redix.noreply_command(conn, ["INCR", "mykey"])
:ok
iex> Redix.command(conn, ["GET", "mykey"])
{:ok, "1"}
"""
# TODO: use @doc since directly when we depend on 1.7+.
if Version.match?(System.version(), "~> 1.7"), do: @doc(since: "0.8.0")
@spec noreply_command(connection(), command(), keyword()) ::
:ok | {:error, atom() | Redix.Error.t() | Redix.ConnectionError.t()}
def noreply_command(conn, command, opts \\ []) do
noreply_pipeline(conn, [command], opts)
end
@doc """
Same as `noreply_command/3` but raises in case of errors.
"""
if Version.match?(System.version(), "~> 1.7"), do: @doc(since: "0.8.0")
@spec noreply_command!(connection(), command(), keyword()) :: :ok
def noreply_command!(conn, command, opts \\ []) do
case noreply_command(conn, command, opts) do
:ok -> :ok
{:error, error} -> raise error
end
end
@doc """
Executes a `MULTI`/`EXEC` transaction.
Redis supports something akin to transactions. It works by sending a `MULTI` command,
then some commands, and then an `EXEC` command. All the commands after `MULTI` are
queued until `EXEC` is issued. When `EXEC` is issued, all the responses to the queued
commands are returned in a list.
## Options
* `:timeout` - (integer or `:infinity`) request timeout (in
milliseconds). Defaults to `#{@default_timeout}`. If the Redis server
doesn't reply within this timeout, `{:error,
%Redix.ConnectionError{reason: :timeout}}` is returned.
## Examples
To run a `MULTI`/`EXEC` transaction in one go, use this function and pass a list of
commands to use in the transaction:
iex> Redix.transaction_pipeline(conn, [["SET", "mykey", "foo"], ["GET", "mykey"]])
{:ok, ["OK", "foo"]}
## Problems with transactions
There's an inherent problem with Redix's architecture and `MULTI`/`EXEC` transaction.
A Redix process is a single connection to Redis that can be used by many clients. If
a client A sends `MULTI` and client B sends a command before client A sends `EXEC`,
client B's command will be part of the transaction. This is intended behaviour, but
it might not be what you expect. This is why `transaction_pipeline/3` exists: this function
wraps `commands` in `MULTI`/`EXEC` but *sends all in a pipeline*. Since everything
is sent in the pipeline, it's sent at once on the connection and no commands can
end up in the middle of the transaction.
## Running `MULTI`/`EXEC` transactions manually
There are still some cases where you might want to start a transaction with `MULTI`,
then send commands from different processes that you actively want to be in the
transaction, and then send an `EXEC` to run the transaction. It's still fine to do
this with `command/3` or `pipeline/3`, but remember what explained in the section
above. If you do this, do it in an isolated connection (open a new one if necessary)
to avoid mixing things up.
"""
# TODO: use @doc since directly when we depend on 1.7+.
if Version.match?(System.version(), "~> 1.7"), do: @doc(since: "0.8.0")
@spec transaction_pipeline(connection(), [command()], keyword()) ::
{:ok, [Redix.Protocol.redis_value()]}
| {:error, atom() | Redix.Error.t() | Redix.ConnectionError.t()}
def transaction_pipeline(conn, [_ | _] = commands, options \\ []) when is_list(commands) do
with {:ok, responses} <- Redix.pipeline(conn, [["MULTI"]] ++ commands ++ [["EXEC"]], options),
do: {:ok, List.last(responses)}
end
@doc """
Executes a `MULTI`/`EXEC` transaction.
Same as `transaction_pipeline/3`, but returns the result directly instead of wrapping it
in an `{:ok, result}` tuple or raises if there's an error.
## Options
* `:timeout` - (integer or `:infinity`) request timeout (in
milliseconds). Defaults to `#{@default_timeout}`. If the Redis server
doesn't reply within this timeout, `{:error,
%Redix.ConnectionError{reason: :timeout}}` is returned.
## Examples
iex> Redix.transaction_pipeline!(conn, [["SET", "mykey", "foo"], ["GET", "mykey"]])
["OK", "foo"]
"""
# TODO: use @doc since directly when we depend on 1.7+.
if Version.match?(System.version(), "~> 1.7"), do: @doc(since: "0.8.0")
@spec transaction_pipeline!(connection(), [command()], keyword()) :: [
Redix.Protocol.redis_value()
]
def transaction_pipeline!(conn, commands, options \\ []) do
case transaction_pipeline(conn, commands, options) do
{:ok, response} -> response
{:error, error} -> raise(error)
end
end
defp pipeline_without_checks(conn, commands, opts) do
timeout = opts[:timeout] || @default_timeout
telemetry_metadata = %{
connection: conn,
commands: commands,
start_time: System.system_time()
}
start_time = System.monotonic_time()
case Redix.Connection.pipeline(conn, commands, timeout) do
{:ok, response} ->
end_time = System.monotonic_time()
measurements = %{elapsed_time: end_time - start_time}
:ok = :telemetry.execute([:redix, :pipeline], measurements, telemetry_metadata)
{:ok, response}
{:error, reason} ->
telemetry_metadata = Map.put(telemetry_metadata, :reason, reason)
:ok = :telemetry.execute([:redix, :pipeline, :error], %{}, telemetry_metadata)
{:error, reason}
end
end
defp assert_valid_pipeline_commands([] = _commands) do
raise ArgumentError, "no commands passed to the pipeline"
end
defp assert_valid_pipeline_commands(commands) when is_list(commands) do
Enum.each(commands, &assert_valid_command/1)
end
defp assert_valid_pipeline_commands(other) do
raise ArgumentError, "expected a list of Redis commands, got: #{inspect(other)}"
end
defp assert_valid_command([]) do
raise ArgumentError, "got an empty command ([]), which is not a valid Redis command"
end
defp assert_valid_command([first, second | _] = command) do
case String.upcase(first) do
first when first in ["SUBSCRIBE", "PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE"] ->
raise ArgumentError,
"Redix doesn't support Pub/Sub commands; use redix_pubsub " <>
"(https://github.com/whatyouhide/redix_pubsub) for Pub/Sub " <>
"functionality support. Offending command: #{inspect(command)}"
"CLIENT" ->
if String.upcase(second) == "REPLY" do
raise ArgumentError,
"CLIENT REPLY commands are forbidden because of how Redix works internally. " <>
"If you want to issue commands without getting a reply, use noreply_pipeline/2 or noreply_command/2"
end
_other ->
:ok
end
end
defp assert_valid_command(other) when not is_list(other) do
raise ArgumentError,
"expected a list of binaries as each Redis command, got: #{inspect(other)}"
end
defp assert_valid_command(_command) do
:ok
end
end | lib/redix.ex | 0.931252 | 0.748789 | redix.ex | starcoder |
defmodule Ecto.Model.Queryable do
@moduledoc """
Defines a model as queryable.
In order to create queries in Ecto, you need to pass a queryable
data structure as argument. By using `Ecto.Model.Queryable` in
your model, it imports the `queryable/2` macro.
Assuming you have an entity named `Weather.Entity`, you can associate
it with a model via:
defmodule Weather do
use Ecto.Model
queryable "weather", Weather.Entity
end
Since this is a common pattern, Ecto allows developers to define an entity
inlined in a model:
defmodule Weather do
use Ecto.Model
queryable "weather" do
field :city, :string
field :temp_lo, :integer
field :temp_hi, :integer
field :prcp, :float, default: 0.0
end
end
By making it queryable, three functions are added to the model:
* `new/0` - simply delegates to `entity.new/0`
* `new/1` - simply delegates to `entity.new/1`
* `__model__/1` - reflection functions about the source and entity
This module also automatically imports `from/1` and `from/2`
from `Ecto.Query` as a convenience.
## Entity defaults
When using the block syntax, the created entity uses the usual default
of a primary key named `:id`, of type `:integer`. This can be customized
by passing `primary_key: false` to queryable:
queryable "weather", primary_key: false do
...
end
Or by passing a tuple in the format `{ field, type, opts }`:
queryable "weather", primary_key: { :custom_field, :string, [] } do
...
end
Global defaults can be specified via the `@queryable_defaults` attribute.
This is useful if you want to use a different default primary key
through your entire application.
The supported options are:
* `primary_key` - either `false`, or a `{ field, type, opts }` tuple
* `foreign_key_type` - sets the type for any belongs_to associations.
This can be overrided using the `:type` option
to the `belongs_to` statement. Defaults to
type `:integer`
## Reflection
Any queryable model module will generate the `__model__` function that can be
used for runtime introspection of the model.
* `__model__(:source)` - Returns the "source" as given to `queryable/2`;
* `__model__(:entity)` - Returns the entity module as given to or generated by
`queryable/2`;
## Example
defmodule MyApp.Model do
defmacro __using__(_) do
quote do
@queryable_defaults primary_key: { :uuid, :string, [] },
foreign_key_type: :string
use Ecto.Model
end
end
end
defmodule MyApp.Post do
use MyApp.Model
queryable "posts" do
has_many :comments, MyApp.Comment
end
end
defmodule MyApp.Comment do
use MyApp.Model
queryable "comments" do
belongs_to :post, MyApp.Comment
end
end
By using `MyApp.Model`, any `MyApp.Post` and `MyApp.Comment` entities
will get the `:uuid` field, with type `:string` as the primary key.
The `belongs_to` association on `MyApp.Comment` will also now require
that `:post_id` be of `:string` type to reference the `:uuid` of a
`MyApp.Post` entity.
"""
@doc false
defmacro __using__(_) do
quote do
import Ecto.Query, only: [from: 2]
import unquote(__MODULE__)
end
end
@doc """
Defines a queryable name and its entity.
The source and entity can be accessed during the model compilation
via `@ecto_source` and `@ecto_entity`.
## Example
defmodule Post do
use Ecto.Model
queryable "posts", Post.Entity
end
"""
defmacro queryable(source, entity)
@doc """
Defines a queryable name and the entity definition inline. `opts` will be
given to the `use Ecto.Entity` call, see `Ecto.Entity`.
## Examples
# The two following Model definitions are equivalent
defmodule Post do
use Ecto.Model
queryable "posts" do
field :text, :string
end
end
defmodule Post do
use Ecto.Model
defmodule Entity do
use Ecto.Entity, model: Post
field :text, :string
end
queryable "posts", Entity
end
"""
defmacro queryable(source, opts \\ [], do: block)
defmacro queryable(source, opts, [do: block]) do
quote do
opts =
(Module.get_attribute(__MODULE__, :queryable_defaults) || [])
|> Keyword.merge(unquote(opts))
|> Keyword.put(:model, __MODULE__)
defmodule Entity do
use Ecto.Entity, opts
unquote(block)
end
queryable(unquote(source), Entity)
end
end
defmacro queryable(source, [], entity) do
quote do
@ecto_source unquote(source)
@ecto_entity unquote(entity)
@doc "Delegates to #{@ecto_entity}.new/0"
def new(), do: @ecto_entity.new()
@doc "Delegates to #{@ecto_entity}.new/1"
def new(params), do: @ecto_entity.new(params)
@doc false
def __model__(:source), do: @ecto_source
def __model__(:entity), do: @ecto_entity
@doc false
def __queryable__,
do: Ecto.Query.Query[from: { @ecto_source, @ecto_entity, __MODULE__ }]
end
end
end | lib/ecto/model/queryable.ex | 0.852798 | 0.611411 | queryable.ex | starcoder |
defmodule RRange.Ruby do
@moduledoc """
Summarized all of Ruby's Range functions.
Functions corresponding to the following patterns are not implemented
- When a function with the same name already exists in Elixir.
- When a method name includes `!`.
- %, ==, ===
"""
@spec __using__(any) :: list
defmacro __using__(_opts) do
RUtils.define_all_functions!(__MODULE__)
end
use RRange.RubyEnd
# https://ruby-doc.org/core-3.1.0/Range.html
# [:begin, :bsearch, :count, :cover?, :each, :end, :entries, :eql?, :exclude_end?, :first, :hash, :include?, :inspect, :last, :max, :member?, :min, :minmax, :size, :step, :to_a, :to_s]
# |> RUtils.required_functions([Range, REnum])
# ✔ begin
# × bsearch
# ✔ cover?
# ✔ end
# ✔ eql?
# × exclude_end?
# × hash
# ✔ inspect
# ✔ last
# ✔ step
# ✔ to_s
@doc """
Returns true if list1 == list2.
## Examples
iex> 1..3
iex> |> RList.eql?(1..3)
true
iex> 1..3
iex> |> RList.eql?(1..4)
false
"""
@spec eql?(Range.t(), Range.t()) :: boolean()
def eql?(range1, range2) do
range1 == range2
end
@doc """
Returns the first element of range.
## Examples
iex> RList.begin(1..3)
1
"""
@spec begin(Range.t()) :: integer()
def begin(begin.._) do
begin
end
if(VersionManager.support_version?()) do
@doc """
Returns Stream that from given range split into by given step.
## Examples
iex> RList.step(1..10, 2)
iex> |> Enum.to_list()
[1, 3, 5, 7, 9]
"""
@spec step(Range.t(), integer()) :: Enum.t()
def step(begin..last, step) do
begin..last//step
|> REnum.Ruby.lazy()
end
@doc """
Executes `Enum.each` to g given range split into by given step.
## Examples
iex> RList.step(1..10, 2, &IO.inspect(&1))
iex> |> Enum.to_list()
# 1
# 3
# 5
# 7
# 9
:ok
"""
@spec step(Range.t(), integer(), function()) :: :ok
def step(begin..last, step, func) do
begin..last//step
|> Enum.each(func)
end
end
defdelegate inspect(range), to: Kernel, as: :inspect
defdelegate to_s(range), to: Kernel, as: :inspect
defdelegate cover?(range, n), to: Enum, as: :member?
end | lib/r_range/ruby.ex | 0.861305 | 0.544499 | ruby.ex | starcoder |
defmodule SurfaceBootstrap.NavBar do
@moduledoc """
The NavBar component.
Due to the massive amount of permutations possible in NavBar,
this component focuses on the two main outer wrapping features
of setting up a NavBar.
1. The NavBar itself, with coloring etc
2. The inner collapsible component NavBar.Collapse
Using the outer without the inner is ok, you cannot use the inner
without the outer.
The component `DropDown` has a property that enables it to be used
in a NavBar. This is the `wrapper` property and can be set to "nav_item".
Please refer to the examples for usage examples.
https://getbootstrap.com/docs/5.0/components/navbar/
"""
use Surface.Component
alias SurfaceBootstrap.NavBar.Collapse
@colors ~w(primary secondary success danger warning info light dark)
@sizes ~w(sm md lg xl xxl fluid)
@doc """
Id, must be set if you want to manually put a collapsible button somewhere
else than inside the current `NavBar`. For example when using a sidebar.
Please look at the examples to see how to use to set up a sidebar.
"""
prop id, :string
@doc """
Color type of navbar, this essentially means if the menu items etc
are colored to match a dark or light background. If you plan to use
a dark bg_color you should be doing color_type=dark as well so that
menu items etc show lightened up. This defaults to light.
"""
prop color_type, :string, default: "light", values: ~w(light dark)
@doc "Background color of navbar"
prop bg_color, :string, default: "light", values: @colors
@doc "Nav size"
prop nav_size, :string, values: @sizes -- ["fluid"], default: "lg"
@doc "Container size, defaults to fluid"
prop container_size, :string, values: @sizes, default: "fluid"
@doc "Class to propagate to inner container"
prop container_class, :css_class, default: []
@doc "Placement? Not set defaults See: https://getbootstrap.com/docs/5.0/components/navbar/?#placement"
prop placement, :string, values: ~w(fixed_top fixed_bottom sticky_top)
@doc "Shadow"
prop shadow, :boolean
@doc "Any custom style you want to add to navbar"
prop style, :string
@doc "Is this sidebar Nav? Use `NavBar.SidebarItemGroup` and `NavBar.SidebarItem` to populate if so. "
prop sidebar, :boolean
@doc """
Sidebar collapse. This prop MUST be used with a custom `NavBar.Collapse` placed
"""
prop sidebar_collapse, :boolean
@doc "Any classes to put on the nav"
prop class, :css_class, default: []
@doc "Wrap content in collapsible and add collapsing button"
prop collapsible, :boolean, default: true
@doc """
Optional collapsible ID if you need to override. You need to set this
if you intend to use multiple navbars with collapse on the same page, as the
collapser code uses HTML ID to identify what to collapse / uncollapse.
"""
prop collapsible_id, :string, default: "navbar-bootstrap-collapsible-default-id"
@doc """
Should navbar be scrollable in collapsed state?
See: https://getbootstrap.com/docs/5.0/components/navbar/?#scrolling
"""
prop collapsible_scrollable, :boolean
@doc "Aria label for collapsible button"
prop collapsible_aria_label, :string
slot brand
slot default
def render(assigns) do
~F"""
<nav
id={@id}
class={[
"navbar",
"navbar-expand-#{@nav_size}",
"navbar-#{@color_type}": @color_type,
"bg-#{@bg_color}": @bg_color,
"fixed-top": @placement == "fixed_top",
"fixed-bottom": @placement == "fixed_bottom",
"sticky-top": @placement == "sticky_top",
shadow: @shadow,
sidebar: @sidebar,
collapse: @sidebar_collapse
] ++ @class}
:attrs={style: @style}
>
<div class={["container-#{@container_size}": @container_size] ++ @container_class}>
<Collapse
:if={@collapsible}
id={@collapsible_id}
scrollable={@collapsible_scrollable}
aria_label={@collapsible_aria_label}
>
<#slot :if={@collapsible} />
</Collapse>
<#slot :if={!@collapsible} />
</div>
</nav>
"""
end
end | lib/surface_bootstrap/navbar.ex | 0.724091 | 0.542863 | navbar.ex | starcoder |
defmodule Assent.Strategy.OAuth2 do
@moduledoc """
OAuth 2.0 strategy.
This strategy only supports the Authorization Code flow per
[RFC 6749](https://tools.ietf.org/html/rfc6749#section-1.3.1).
`authorize_url/1` returns a map with a `:url` and `:session_params` key. The
`:session_params` should be stored and passed back into `callback/3` as part
of config when the user returns. The `:session_params` carries a `:state`
value for the request [to prevent
CSRF](https://tools.ietf.org/html/rfc6749#section-4.1.1).
This library also supports JWT tokens for client authentication as per
[RFC 7523](https://tools.ietf.org/html/rfc7523).
## Configuration
- `:client_id` - The OAuth2 client id, required
- `:site` - The domain of the OAuth2 server, required
- `:auth_method` - The authentication strategy used, optional. If not set,
no authentication will be used during the access token request. The value
may be one of the following:
- `:client_secret_basic` - Authenticate with basic authorization header
- `:client_secret_post` - Authenticate with post params
- `:client_secret_jwt` - Authenticate with JWT using `:client_secret` as
secret
- `:private_key_jwt` - Authenticate with JWT using `:private_key_path` or
`:private_key` as secret
- `:client_secret` - The OAuth2 client secret, required if `:auth_method`
is `:client_secret_basic`, `:client_secret_post`, or `:client_secret_jwt`
- `:private_key_id` - The private key ID, required if `:auth_method` is
`:private_key_jwt`
- `:private_key_path` - The path for the private key, required if
`:auth_method` is `:private_key_jwt` and `:private_key` hasn't been set
- `:private_key` - The private key content that can be defined instead of
`:private_key_path`, required if `:auth_method` is `:private_key_jwt` and
`:private_key_path` hasn't been set
- `:jwt_algorithm` - The algorithm to use for JWT signing, optional,
defaults to `HS256` for `:client_secret_jwt` and `RS256` for
`:private_key_jwt`
## Usage
config = [
client_id: "REPLACE_WITH_CLIENT_ID",
client_secret: "REPLACE_WITH_CLIENT_SECRET",
auth_method: :client_secret_post,
site: "https://auth.example.com",
authorization_params: [scope: "user:read user:write"],
user_url: "https://example.com/api/user"
]
{:ok, {url: url, session_params: session_params}} =
config
|> Assent.Config.put(:redirect_uri, "http://localhost:4000/auth/callback")
|> Assent.Strategy.OAuth2.authorize_url()
{:ok, %{user: user, token: token}} =
config
|> Assent.Config.put(:session_params, session_params)
|> Assent.Strategy.OAuth2.callback(params)
"""
@behaviour Assent.Strategy
alias Assent.Strategy, as: Helpers
alias Assent.{CallbackCSRFError, CallbackError, Config, HTTPAdapter.HTTPResponse, JWTAdapter, MissingParamError, RequestError}
@doc """
Generate authorization URL for request phase.
## Configuration
- `:redirect_uri` - The URI that the server redirects the user to after
authentication, required
- `:authorize_url` - The path or URL for the OAuth2 server to redirect
users to, defaults to `/oauth/authorize`
- `:authorization_params` - The authorization parameters, defaults to `[]`
"""
@impl true
@spec authorize_url(Config.t()) :: {:ok, %{session_params: %{state: binary()}, url: binary()}} | {:error, term()}
def authorize_url(config) do
with {:ok, redirect_uri} <- Config.fetch(config, :redirect_uri),
{:ok, site} <- Config.fetch(config, :site),
{:ok, client_id} <- Config.fetch(config, :client_id) do
params = authorization_params(config, client_id, redirect_uri)
authorize_url = Config.get(config, :authorize_url, "/oauth/authorize")
url = Helpers.to_url(site, authorize_url, params)
{:ok, %{url: url, session_params: %{state: params[:state]}}}
end
end
defp authorization_params(config, client_id, redirect_uri) do
params = Config.get(config, :authorization_params, [])
[
response_type: "code",
client_id: client_id,
state: gen_state(),
redirect_uri: redirect_uri]
|> Keyword.merge(params)
|> List.keysort(0)
end
defp gen_state do
24
|> :crypto.strong_rand_bytes()
|> :erlang.bitstring_to_list()
|> Enum.map(fn x -> :erlang.integer_to_binary(x, 16) end)
|> Enum.join()
|> String.downcase()
end
@doc """
Callback phase for generating access token with authorization code and fetch
user data. Returns a map with access token in `:token` and user data in
`:user`.
## Configuration
- `:token_url` - The path or URL to fetch the token from, optional,
defaults to `/oauth/token`
- `:user_url` - The path or URL to fetch user data, required
- `:session_params` - The session parameters that was returned from
`authorize_url/1`, optional
"""
@impl true
@spec callback(Config.t(), map(), atom()) :: {:ok, %{user: map(), token: map()}} | {:error, term()}
def callback(config, params, strategy \\ __MODULE__) do
with {:ok, session_params} <- Config.fetch(config, :session_params),
:ok <- check_error_params(params),
{:ok, code} <- fetch_code_param(params),
{:ok, redirect_uri} <- Config.fetch(config, :redirect_uri),
:ok <- maybe_check_state(session_params, params),
{:ok, token} <- grant_access_token(config, "authorization_code", code: code, redirect_uri: redirect_uri) do
fetch_user_with_strategy(config, token, strategy)
end
end
defp check_error_params(%{"error" => _} = params) do
message = params["error_description"] || params["error_reason"] || params["error"]
error = params["error"]
error_uri = params["error_uri"]
{:error, %CallbackError{message: message, error: error, error_uri: error_uri}}
end
defp check_error_params(_params), do: :ok
defp fetch_code_param(%{"code" => code}), do: {:ok, code}
defp fetch_code_param(params), do: {:error, MissingParamError.new("code", params)}
defp maybe_check_state(%{state: stored_state}, %{"state" => provided_state}) do
case Assent.constant_time_compare(stored_state, provided_state) do
true -> :ok
false -> {:error, CallbackCSRFError.new("state")}
end
end
defp maybe_check_state(%{state: _state}, params) do
{:error, MissingParamError.new("state", params)}
end
defp maybe_check_state(_session_params, _params), do: :ok
defp authentication_params(nil, config) do
with {:ok, client_id} <- Config.fetch(config, :client_id) do
headers = []
body = [client_id: client_id]
{:ok, headers, body}
end
end
defp authentication_params(:client_secret_basic, config) do
with {:ok, client_id} <- Config.fetch(config, :client_id),
{:ok, client_secret} <- Config.fetch(config, :client_secret) do
auth = Base.url_encode64("#{client_id}:#{client_secret}", padding: false)
headers = [{"authorization", "Basic #{auth}"}]
body = []
{:ok, headers, body}
end
end
defp authentication_params(:client_secret_post, config) do
with {:ok, client_id} <- Config.fetch(config, :client_id),
{:ok, client_secret} <- Config.fetch(config, :client_secret) do
headers = []
body = [client_id: client_id, client_secret: client_secret]
{:ok, headers, body}
end
end
defp authentication_params(:client_secret_jwt, config) do
alg = Config.get(config, :jwt_algorithm, "HS256")
with {:ok, client_secret} <- Config.fetch(config, :client_secret) do
jwt_authentication_params(alg, client_secret, config)
end
end
defp authentication_params(:private_key_jwt, config) do
alg = Config.get(config, :jwt_algorithm, "RS256")
with {:ok, pem} <- JWTAdapter.load_private_key(config),
{:ok, _private_key_id} <- Config.fetch(config, :private_key_id) do
jwt_authentication_params(alg, pem, config)
end
end
defp authentication_params(method, _config) do
{:error, "Invalid `:auth_method` #{method}"}
end
defp jwt_authentication_params(alg, secret, config) do
with {:ok, claims} <- jwt_claims(config),
{:ok, token} <- Helpers.sign_jwt(claims, alg, secret, config) do
headers = []
body = [client_assertion: token, client_assertion_type: "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"]
{:ok, headers, body}
end
end
defp jwt_claims(config) do
timestamp = :os.system_time(:second)
with {:ok, site} <- Config.fetch(config, :site),
{:ok, client_id} <- Config.fetch(config, :client_id) do
{:ok, %{
"iss" => client_id,
"sub" => client_id,
"aud" => site,
"iat" => timestamp,
"exp" => timestamp + 60
}}
end
end
@doc """
Grants an access token.
"""
@spec grant_access_token(Config.t(), binary(), Keyword.t()) :: {:ok, map()} | {:error, term()}
def grant_access_token(config, grant_type, params) do
auth_method = Config.get(config, :auth_method, nil)
token_url = Config.get(config, :token_url, "/oauth/token")
with {:ok, site} <- Config.fetch(config, :site),
{:ok, auth_headers, auth_body} <- authentication_params(auth_method, config) do
headers = [{"content-type", "application/x-www-form-urlencoded"}] ++ auth_headers
params = Keyword.merge(params, Keyword.put(auth_body, :grant_type, grant_type))
url = Helpers.to_url(site, token_url)
body = URI.encode_query(params)
:post
|> Helpers.request(url, body, headers, config)
|> Helpers.decode_response(config)
|> process_access_token_response()
end
end
defp process_access_token_response({:ok, %HTTPResponse{status: 200, body: %{"access_token" => _} = token}}), do: {:ok, token}
defp process_access_token_response(any), do: process_response(any)
defp process_response({:ok, %HTTPResponse{} = response}), do: {:error, RequestError.unexpected(response)}
defp process_response({:error, %HTTPResponse{} = response}), do: {:error, RequestError.invalid(response)}
defp process_response({:error, error}), do: {:error, error}
defp fetch_user_with_strategy(config, token, strategy) do
config
|> strategy.fetch_user(token)
|> case do
{:ok, user} -> {:ok, %{token: token, user: user}}
{:error, error} -> {:error, error}
end
end
@doc """
Refreshes the access token.
"""
@spec refresh_access_token(Config.t(), map(), Keyword.t()) :: {:ok, map()} | {:error, term()}
def refresh_access_token(config, token, params \\ []) do
with {:ok, refresh_token} <- fetch_from_token(token, "refresh_token") do
grant_access_token(config, "refresh_token", Keyword.put(params, :refresh_token, refresh_token))
end
end
@doc """
Performs a HTTP request to the API using the access token.
"""
@spec request(Config.t(), map(), atom(), binary(), map() | Keyword.t(), [{binary(), binary()}]) :: {:ok, map()} | {:error, term()}
def request(config, token, method, url, params \\ [], headers \\ []) do
with {:ok, site} <- Config.fetch(config, :site),
{:ok, auth_headers} <- authorization_headers(config, token) do
req_headers = request_headers(method, auth_headers ++ headers)
req_body = request_body(method, params)
params = url_params(method, params)
url = Helpers.to_url(site, url, params)
method
|> Helpers.request(url, req_body, req_headers, config)
|> Helpers.decode_response(config)
end
end
defp request_headers(:post, headers), do: [{"content-type", "application/x-www-form-urlencoded"}] ++ headers
defp request_headers(_method, headers), do: headers
defp request_body(:post, params), do: URI.encode_query(params)
defp request_body(_method, _params), do: nil
defp url_params(:post, _params), do: []
defp url_params(_method, params), do: params
@doc """
Fetch user data with the access token.
Uses `request/6` to fetch the user data.
"""
@spec fetch_user(Config.t(), map(), map() | Keyword.t()) :: {:ok, map()} | {:error, term()}
def fetch_user(config, token, params \\ []) do
with {:ok, user_url} <- Config.fetch(config, :user_url) do
config
|> request(token, :get, user_url, params)
|> process_user_response()
end
end
defp authorization_headers(config, token) do
type =
token
|> Map.get("token_type", "Bearer")
|> String.downcase()
authorization_headers(config, token, type)
end
defp authorization_headers(_config, token, "bearer") do
with {:ok, access_token} <- fetch_from_token(token, "access_token") do
{:ok, [{"authorization", "Bearer #{access_token}"}]}
end
end
defp authorization_headers(_config, _token, type) do
{:error, "Authorization with token type `#{type}` not supported"}
end
defp fetch_from_token(token, key) do
case Map.fetch(token, key) do
{:ok, value} -> {:ok, value}
:error -> {:error, "No `#{key}` in token map"}
end
end
defp process_user_response({:ok, %HTTPResponse{status: 200, body: user}}), do: {:ok, user}
defp process_user_response({:error, %HTTPResponse{status: 401}}), do: {:error, %RequestError{message: "Unauthorized token"}}
defp process_user_response(any), do: process_response(any)
end | lib/assent/strategies/oauth2.ex | 0.890526 | 0.615406 | oauth2.ex | starcoder |
defmodule Nebulex.Adapters.Local do
@moduledoc """
Adapter module for Local Generational Cache.
It uses [Shards](https://github.com/cabol/shards) as in-memory backend
(ETS tables are used internally).
## Features
* Support for generational cache – inspired by
[epocxy](https://github.com/duomark/epocxy).
* Support for Sharding – handled by `:shards`.
* Support for garbage collection via `Nebulex.Adapters.Local.Generation`
* Support for transactions via Erlang global name registration facility
## Options
These options can be set through the config file:
* `:n_shards` - The number of partitions for each Cache generation table.
Defaults to `:erlang.system_info(:schedulers_online)`.
* `:n_generations` - Max number of Cache generations, defaults to `2`.
* `:read_concurrency` - Indicates whether the tables that `:shards`
creates uses `:read_concurrency` or not (default: true).
* `:write_concurrency` - Indicates whether the tables that `:shards`
creates uses `:write_concurrency` or not (default: true).
* `:gc_interval` - Interval time in seconds to garbage collection to run,
delete the oldest generation and create a new one. If this option is
not set, garbage collection is never executed, so new generations
must be created explicitly, e.g.: `MyCache.new_generation([])`.
## Example
`Nebulex.Cache` is the wrapper around the cache. We can define a
local cache as follows:
defmodule MyApp.LocalCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Local
end
Where the configuration for the cache must be in your application
environment, usually defined in your `config/config.exs`:
config :my_app, MyApp.LocalCache,
n_shards: 2,
gc_interval: 3600,
write_concurrency: true
For more information about the usage, check out `Nebulex.Cache`.
## Extended API
This adapter provides some additional functions to the `Nebulex.Cache` API.
Most of these functions are used internally by the adapter, but there is one
function which is indeed provided to be used from the Cache API, and it is
the function to create new generations: `new_generation/1`.
MyApp.LocalCache.new_generation
Other additional function that might be useful is: `__metadata__`,
which is used to retrieve the Cache Metadata.
MyApp.LocalCache.__metadata__
The rest of the functions as we mentioned before, are for internal use.
## Queryable API
The adapter supports as query parameter the following values:
* `query` - `nil | :all_unexpired | :all_expired | :ets.match_spec()`
Internally, an entry is represented by the tuple `{key, val, vsn, exp}`,
which means the match pattern within the `:ets.match_spec()` must be
something like `{:"$1", :"$2", :"$3", :"$4"}`. In order to make query
building easier, you can use `Ex2ms` library.
## Examples
# built-in queries
MyCache.all()
MyCache.all(:all_unexpired)
MyCache.all(:all_expired)
# using a custom match spec (all values > 10)
spec = [{{:"$1", :"$2", :_, :_}, [{:>, :"$2", 10}], [{{:"$1", :"$2"}}]}]
MyCache.all(spec)
# using Ex2ms
import Ex2ms
spec =
fun do
{key, value, _version, _expire_at} when value > 10 -> {key, value}
end
MyCache.all(spec)
The `:return` option applies only for built-in queries, such as:
`nil | :all_unexpired | :all_expired`, if you are using a
custom `:ets.match_spec()`, the return value depends on it.
The same applies to `stream` function.
"""
# Inherit default transaction implementation
use Nebulex.Adapter.Transaction
# Provide Cache Implementation
@behaviour Nebulex.Adapter
@behaviour Nebulex.Adapter.Queryable
alias Nebulex.Adapters.Local.Generation
alias Nebulex.Object
alias :shards_local, as: Local
## Adapter
@impl true
defmacro __before_compile__(env) do
cache = env.module
config = Module.get_attribute(cache, :config)
n_shards = Keyword.get(config, :n_shards, System.schedulers_online())
r_concurrency = Keyword.get(config, :read_concurrency, true)
w_concurrency = Keyword.get(config, :write_concurrency, true)
shards_sup_name = Module.concat([cache, ShardsSupervisor])
quote do
alias Nebulex.Adapters.Local.Generation
alias Nebulex.Adapters.Local.Metadata
def __metadata__, do: Metadata.get(__MODULE__)
def __state__, do: :shards_state.new(unquote(n_shards))
def __shards_sup_name__, do: unquote(shards_sup_name)
def __tab_opts__ do
[
n_shards: unquote(n_shards),
sup_name: unquote(shards_sup_name),
read_concurrency: unquote(r_concurrency),
write_concurrency: unquote(w_concurrency)
]
end
def new_generation(opts \\ []) do
Generation.new(__MODULE__, opts)
end
end
end
@impl true
def init(opts) do
cache = Keyword.fetch!(opts, :cache)
children = [
%{
id: cache.__shards_sup_name__,
start: {:shards_sup, :start_link, [cache.__shards_sup_name__]},
type: :supervisor
},
%{
id: Generation,
start: {Generation, :start_link, [cache, opts]}
}
]
{:ok, children}
end
@impl true
def get(cache, key, _opts) do
cache.__metadata__.generations
|> do_get(cache, key)
|> entry_to_object()
end
defp do_get([newest | _] = generations, cache, key) do
case fetch(cache, newest, key) do
nil ->
generations
|> retrieve(cache, key)
|> elem(1)
ret ->
ret
end
end
defp fetch(cache, gen, key, fun \\ &local_get/4) do
fun
|> apply([gen, key, nil, cache.__state__])
|> validate_ttl(gen, cache)
end
defp retrieve([newest | olders], cache, key) do
Enum.reduce_while(olders, {newest, nil}, fn gen, {newer, _} ->
case fetch(cache, gen, key, &local_pop/4) do
nil ->
{:cont, {gen, nil}}
{k, v, vsn, exp} ->
# make sure we take the old timestamp since it will get set to
# the default :infinity otherwise.
entry = {k, v, vsn, diff_epoch(exp)}
true = Local.insert(newer, entry, cache.__state__)
{:halt, {gen, entry}}
end
end)
end
@impl true
def get_many(cache, keys, _opts) do
Enum.reduce(keys, %{}, fn key, acc ->
if obj = get(cache, key, []),
do: Map.put(acc, key, obj),
else: acc
end)
end
@impl true
def set(cache, object, opts) do
opts
|> Keyword.get(:action, :set)
|> do_set(cache, object)
end
defp do_set(:set, cache, %Object{key: key, value: val, version: vsn, expire_at: exp}) do
local_set(cache, {key, val, vsn, exp})
end
defp do_set(:add, cache, %Object{key: key, value: val, version: vsn, expire_at: exp}) do
cache.__metadata__.generations
|> hd()
|> Local.insert_new({key, val, vsn, exp}, cache.__state__)
end
defp do_set(:replace, cache, %Object{key: key, value: val, version: vsn, expire_at: exp}) do
local_update(cache, key, [{2, val}, {3, vsn}, {4, exp}])
end
@impl true
def set_many(cache, objects, _opts) do
entries =
for %Object{key: key, value: value, version: version, expire_at: expire_at} <- objects,
value != nil do
{key, value, version, expire_at}
end
true = local_set(cache, entries)
:ok
rescue
_ -> {:error, for(o <- objects, do: o.key)}
end
@impl true
def delete(cache, key, _opts) do
Enum.each(cache.__metadata__.generations, &Local.delete(&1, key, cache.__state__))
end
@impl true
def take(cache, key, _opts) do
Enum.reduce_while(cache.__metadata__.generations, nil, fn gen, acc ->
case Local.take(gen, key, cache.__state__) do
[entry] -> {:halt, entry_to_object(entry)}
[] -> {:cont, acc}
end
end)
end
@impl true
def has_key?(cache, key) do
Enum.reduce_while(cache.__metadata__.generations, false, fn gen, acc ->
if has_key?(gen, key, now(), cache.__state__),
do: {:halt, true},
else: {:cont, acc}
end)
end
defp has_key?(gen, key, now, state) do
case Local.lookup_element(gen, key, 4, state) do
exp when not is_nil(exp) and exp <= now ->
true = Local.delete(gen, key, state)
false
_ ->
true
end
rescue
ArgumentError -> false
end
@impl true
def object_info(cache, key, attr) do
Enum.reduce_while(cache.__metadata__.generations, nil, fn gen, acc ->
case {fetch(cache, gen, key), attr} do
{{_, _, _, exp}, :ttl} ->
{:halt, Object.remaining_ttl(exp)}
{{_, _, vsn, _}, :version} ->
{:halt, vsn}
{nil, _} ->
{:cont, acc}
end
end)
end
@impl true
def expire(cache, key, ttl) do
expire_at = Object.expire_at(ttl)
case local_update(cache, key, {4, expire_at}) do
true -> expire_at || :infinity
false -> nil
end
end
@impl true
def update_counter(cache, key, incr, opts) do
exp =
opts
|> Keyword.get(:ttl)
|> Object.expire_at()
cache.__metadata__.generations
|> hd()
|> Local.update_counter(key, {2, incr}, {key, 0, nil, exp}, cache.__state__)
end
@impl true
def size(cache) do
Enum.reduce(cache.__metadata__.generations, 0, fn gen, acc ->
gen
|> Local.info(:size, cache.__state__)
|> Kernel.+(acc)
end)
end
@impl true
def flush(cache) do
:ok = Generation.flush(cache)
_ = cache.new_generation()
:ok
end
## Queryable
@impl true
def all(cache, query, opts) do
query = validate_match_spec(query, opts)
for gen <- cache.__metadata__.generations,
elems <- Local.select(gen, query, cache.__state__),
do: elems
end
@impl true
def stream(cache, query, opts) do
query
|> validate_match_spec(opts)
|> do_stream(cache, Keyword.get(opts, :page_size, 10))
end
defp do_stream(match_spec, cache, page_size) do
Stream.resource(
fn ->
[newer | _] = generations = cache.__metadata__.generations
result = Local.select(newer, match_spec, page_size, cache.__state__)
{result, generations}
end,
fn
{:"$end_of_table", [_gen]} ->
{:halt, []}
{:"$end_of_table", [_gen | generations]} ->
result =
generations
|> hd()
|> Local.select(match_spec, page_size, cache.__state__)
{[], {result, generations}}
{{elements, cont}, [_ | _] = generations} ->
{elements, {Local.select(cont), generations}}
end,
& &1
)
end
## Transaction
@impl true
def transaction(cache, fun, opts) do
keys = Keyword.get(opts, :keys, [])
nodes = Keyword.get(opts, :nodes, [node()])
retries = Keyword.get(opts, :retries, :infinity)
do_transaction(cache, keys, nodes, retries, fun)
end
## Helpers
defp local_get(tab, key, default, state) do
case Local.lookup(tab, key, state) do
[] -> default
[entry] -> entry
end
end
defp local_pop(tab, key, default, state) do
case Local.take(tab, key, state) do
[] -> default
[entry] -> entry
end
end
defp local_set(cache, entries) do
cache.__metadata__.generations
|> hd()
|> Local.insert(entries, cache.__state__)
end
defp local_update(cache, key, updates) do
cache.__metadata__.generations
|> hd()
|> Local.update_element(key, updates, cache.__state__)
end
defp entry_to_object(nil), do: nil
defp entry_to_object({key, val, vsn, :infinity}) do
%Object{key: key, value: val, version: vsn}
end
defp entry_to_object({key, val, vsn, expire_at}) do
%Object{key: key, value: val, version: vsn, expire_at: expire_at}
end
defp validate_ttl(nil, _, _), do: nil
defp validate_ttl({_, _, _, nil} = entry, _, _), do: entry
defp validate_ttl({key, _, _, expire_at} = entry, gen, cache) do
if expire_at > now() do
entry
else
true = Local.delete(gen, key, cache.__state__)
nil
end
end
defp diff_epoch(ttl) when is_integer(ttl), do: ttl - now()
defp diff_epoch(_), do: :infinity
defp now, do: DateTime.to_unix(DateTime.utc_now())
defp validate_match_spec(spec, opts) when spec in [nil, :all_unexpired, :all_expired] do
[
{
{:"$1", :"$2", :"$3", :"$4"},
if(spec = comp_match_spec(spec), do: [spec], else: []),
ret_match_spec(opts)
}
]
end
defp validate_match_spec(spec, _opts) do
case Local.test_ms({nil, nil, nil, :infinity}, spec) do
{:ok, _result} ->
spec
{:error, _result} ->
raise Nebulex.QueryError, message: "invalid match spec", query: spec
end
end
defp comp_match_spec(nil),
do: nil
defp comp_match_spec(:all_unexpired),
do: {:orelse, {:==, :"$4", nil}, {:>, :"$4", now()}}
defp comp_match_spec(:all_expired),
do: {:not, comp_match_spec(:all_unexpired)}
defp ret_match_spec(opts) do
case Keyword.get(opts, :return, :key) do
:key -> [:"$1"]
:value -> [:"$2"]
:object -> [%Object{key: :"$1", value: :"$2", version: :"$3", expire_at: :"$4"}]
end
end
end | lib/nebulex/adapters/local.ex | 0.922948 | 0.689407 | local.ex | starcoder |
defmodule BertInt do
@moduledoc """
Binary Erlang Term encoding for internal node-to-node encoding.
"""
@spec encode!(any()) :: binary()
def encode!(term) do
term
|> :erlang.term_to_binary()
|> zip()
end
# Custom impl of :zlib.zip() for faster compression
defp zip(data, level \\ 1) do
z = :zlib.open()
bs =
try do
:zlib.deflateInit(z, level, :deflated, -15, 8, :default)
b = :zlib.deflate(z, data, :finish)
:zlib.deflateEnd(z)
b
after
:zlib.close(z)
end
:erlang.iolist_to_binary(bs)
end
@spec decode!(binary()) :: any()
def decode!(term) do
try do
:zlib.unzip(term)
rescue
[ErlangError, :data_error] ->
term
end
|> :erlang.binary_to_term()
end
@doc """
decode! variant for decoding locally created files, can decode atoms.
"""
def decode_unsafe!(term) do
try do
:zlib.unzip(term)
rescue
[ErlangError, :data_error] ->
term
end
|> :erlang.binary_to_term()
end
end
defmodule BertExt do
@spec encode!(any()) :: binary()
def encode!(term) do
:erlang.term_to_binary(term_to_binary(term))
end
defp term_to_binary(map) when is_map(map) do
^map = Map.from_struct(map)
map
|> Map.to_list()
|> Enum.map(fn {key, value} -> {key, term_to_binary(value)} end)
|> Enum.into(%{})
end
defp term_to_binary(list) when is_list(list) do
Enum.map(list, &term_to_binary(&1))
end
defp term_to_binary(tuple) when is_tuple(tuple) do
Tuple.to_list(tuple)
|> Enum.map(&term_to_binary(&1))
|> List.to_tuple()
end
defp term_to_binary(other) do
other
end
@spec decode!(binary()) :: any()
def decode!(term) do
:erlang.binary_to_term(term, [:safe])
end
end
defmodule ZBert do
require Record
Record.defrecord(:zbert, in_stream: nil, out_stream: nil, module: nil)
def init(mod) do
out = :zlib.open()
:ok = :zlib.deflateInit(out)
inc = :zlib.open()
:ok = :zlib.inflateInit(inc)
zbert(in_stream: inc, out_stream: out, module: mod)
end
def encode!(zbert(out_stream: str, module: mod), term) do
data = mod.encode!(term)
:zlib.deflate(str, data, :sync)
end
def decode!(zbert(in_stream: str, module: mod), data) do
:zlib.inflate(str, data)
|> mod.decode!()
end
end | lib/bert.ex | 0.716913 | 0.442576 | bert.ex | starcoder |
defmodule XDR.HyperUInt do
@moduledoc """
This module manages the `Unsigned Hyper Integer` type based on the RFC4506 XDR Standard.
"""
@behaviour XDR.Declaration
alias XDR.Error.HyperUInt, as: HyperUIntError
defstruct [:datum]
@typedoc """
`XDR.HyperUInt` structure type specification.
"""
@type t :: %XDR.HyperUInt{datum: integer | binary}
@doc """
Create a new `XDR.HyperUInt` structure with the `opaque` and `length` passed.
"""
@spec new(datum :: integer | binary) :: t
def new(datum), do: %XDR.HyperUInt{datum: datum}
@impl XDR.Declaration
@doc """
Encode a `XDR.HyperUInt` structure into a XDR format.
"""
@spec encode_xdr(h_uint :: t) ::
{:ok, binary} | {:error, :not_integer | :exceed_upper_limit | :exceed_lower_limit}
def encode_xdr(%XDR.HyperUInt{datum: datum}) when not is_integer(datum),
do: {:error, :not_integer}
def encode_xdr(%XDR.HyperUInt{datum: datum}) when datum > 18_446_744_073_709_551_615,
do: {:error, :exceed_upper_limit}
def encode_xdr(%XDR.HyperUInt{datum: datum}) when datum < 0,
do: {:error, :exceed_lower_limit}
def encode_xdr(%XDR.HyperUInt{datum: datum}),
do: {:ok, <<datum::big-unsigned-integer-size(64)>>}
@impl XDR.Declaration
@doc """
Encode a `XDR.HyperUInt` structure into a XDR format.
If the `h_uint` is not valid, an exception is raised.
"""
@spec encode_xdr!(h_uint :: t) :: binary
def encode_xdr!(h_uint) do
case encode_xdr(h_uint) do
{:ok, binary} -> binary
{:error, reason} -> raise(HyperUIntError, reason)
end
end
@impl XDR.Declaration
@doc """
Decode the Unsigned Hyper Integer in XDR format to a `XDR.HyperUInt` structure.
"""
@spec decode_xdr(bytes :: binary, h_uint :: any) :: {:ok, {t, binary}} | {:error, :not_binary}
def decode_xdr(bytes, h_uint \\ nil)
def decode_xdr(bytes, _h_uint) when not is_binary(bytes),
do: {:error, :not_binary}
def decode_xdr(<<hyper_uint::big-unsigned-integer-size(64), rest::binary>>, _h_uint),
do: {:ok, {new(hyper_uint), rest}}
@impl XDR.Declaration
@doc """
Decode the Unsigned Hyper Integer in XDR format to a `XDR.HyperUInt` structure.
If the binaries are not valid, an exception is raised.
"""
@spec decode_xdr!(bytes :: binary, h_uint :: any) :: {t, binary}
def decode_xdr!(bytes, h_uint \\ nil)
def decode_xdr!(bytes, h_uint) do
case decode_xdr(bytes, h_uint) do
{:ok, result} -> result
{:error, reason} -> raise(HyperUIntError, reason)
end
end
end | lib/xdr/hyper_uint.ex | 0.921623 | 0.615348 | hyper_uint.ex | starcoder |
defmodule ExDns.Resource.Validation do
def validate_ipv4(record, key) when is_list(record) do
address = String.to_charlist(record[key])
case :inet.parse_ipv4_address(address) do
{:ok, address} -> Keyword.put(record, key, address)
{:error, _} -> add_error(record, {address, "is not a valid IPv4 address"})
end
end
def validate_ipv6(record, key) when is_list(record) do
address = String.to_charlist(record[key])
case :inet.parse_ipv6_address(address) do
{:ok, address} -> Keyword.put(record, key, address)
{:error, _} -> add_error(record, {address, "is not a valid IPv6 address"})
end
end
def validate_integer(record, key) do
do_validate_integer(record, key, record[key])
end
defp do_validate_integer(record, key, value) when is_binary(value) do
case Integer.parse(value) do
{integer, _} -> Keyword.put(record, key, integer)
:error -> add_error(record, {value, "is not a valid integer"})
end
end
defp do_validate_integer(record, _key, value) when is_integer(value) do
record
end
def validate_class(record, key, class) when is_atom(class) do
if record[key] == class do
record
else
add_error(record, {record[key], "is not a valid class. Only IN class is supported."})
end
end
# Split at the first non-escaped "."
def validate_email(record, _key) do
record
end
# Validate that its a domain name - meaning
# an fqdn or
def validate_domain_name(record, name) do
record
end
def add_error(record, message) do
errors = [message | Keyword.get(record, :errors, [])]
Keyword.put(record, :errors, errors)
end
def structify_if_valid(record, module) do
if Keyword.has_key?(record, :errors) do
{:error, {type_from_module(module), record}}
else
{:ok, struct(module, record)}
end
end
def type_from_module(module) do
module
|> Atom.to_string()
|> String.split(".")
|> Enum.reverse()
|> hd
|> String.downcase()
|> String.to_existing_atom()
end
end | lib/ex_dns/resource/validation.ex | 0.596903 | 0.431824 | validation.ex | starcoder |
defmodule Google.Protobuf.NullValue do
@moduledoc false
use Protobuf, enum: true, syntax: :proto3
@type t :: integer | :NULL_VALUE
field :NULL_VALUE, 0
end
defmodule Google.Protobuf.Struct.FieldsEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: Google.Protobuf.Value.t() | nil
}
defstruct [:key, :value]
field :key, 1, type: :string
field :value, 2, type: Google.Protobuf.Value
def transform_module(), do: nil
end
defmodule Google.Protobuf.Struct do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
fields: %{String.t() => Google.Protobuf.Value.t() | nil}
}
defstruct [:fields]
field :fields, 1, repeated: true, type: Google.Protobuf.Struct.FieldsEntry, map: true
def transform_module(), do: nil
end
defmodule Google.Protobuf.Value do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
kind:
{:null_value, Google.Protobuf.NullValue.t()}
| {:number_value, float | :infinity | :negative_infinity | :nan}
| {:string_value, String.t()}
| {:bool_value, boolean}
| {:struct_value, Google.Protobuf.Struct.t() | nil}
| {:list_value, Google.Protobuf.ListValue.t() | nil}
}
defstruct [:kind]
oneof :kind, 0
field :null_value, 1,
type: Google.Protobuf.NullValue,
enum: true,
json_name: "nullValue",
oneof: 0
field :number_value, 2, type: :double, json_name: "numberValue", oneof: 0
field :string_value, 3, type: :string, json_name: "stringValue", oneof: 0
field :bool_value, 4, type: :bool, json_name: "boolValue", oneof: 0
field :struct_value, 5, type: Google.Protobuf.Struct, json_name: "structValue", oneof: 0
field :list_value, 6, type: Google.Protobuf.ListValue, json_name: "listValue", oneof: 0
def transform_module(), do: nil
end
defmodule Google.Protobuf.ListValue do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
values: [Google.Protobuf.Value.t()]
}
defstruct [:values]
field :values, 1, repeated: true, type: Google.Protobuf.Value
def transform_module(), do: nil
end | lib/google_protos/struct.pb.ex | 0.772445 | 0.402627 | struct.pb.ex | starcoder |
defmodule AWS.Support do
@moduledoc """
AWS Support
The AWS Support API reference is intended for programmers who need detailed
information about the AWS Support operations and data types.
This service enables you to manage your AWS Support cases programmatically. It
uses HTTP methods that return results in JSON format.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
The AWS Support service also exposes a set of [AWS Trusted Advisor](http://aws.amazon.com/premiumsupport/trustedadvisor/) features. You can
retrieve a list of checks and their descriptions, get check results, specify
checks to refresh, and get the refresh status of checks.
The following list describes the AWS Support case management operations:
* **Service names, issue categories, and available severity levels.
**The `DescribeServices` and `DescribeSeverityLevels` operations return AWS
service names, service codes, service categories, and problem severity levels.
You use these values when you call the `CreateCase` operation.
* **Case creation, case details, and case resolution.** The
`CreateCase`, `DescribeCases`, `DescribeAttachment`, and `ResolveCase`
operations create AWS Support cases, retrieve information about cases, and
resolve cases.
* **Case communication.** The `DescribeCommunications`,
`AddCommunicationToCase`, and `AddAttachmentsToSet` operations retrieve and add
communications and attachments to AWS Support cases.
The following list describes the operations available from the AWS Support
service for Trusted Advisor:
* `DescribeTrustedAdvisorChecks` returns the list of checks that run
against your AWS resources.
* Using the `checkId` for a specific check returned by
`DescribeTrustedAdvisorChecks`, you can call `DescribeTrustedAdvisorCheckResult`
to obtain the results for the check that you specified.
* `DescribeTrustedAdvisorCheckSummaries` returns summarized results
for one or more Trusted Advisor checks.
* `RefreshTrustedAdvisorCheck` requests that Trusted Advisor rerun a
specified check.
* `DescribeTrustedAdvisorCheckRefreshStatuses` reports the refresh
status of one or more checks.
For authentication of requests, AWS Support uses [Signature Version 4 Signing Process](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
See [About the AWS Support API](https://docs.aws.amazon.com/awssupport/latest/user/Welcome.html) in the
*AWS Support User Guide* for information about how to use this service to create
and manage your support cases, and how to call Trusted Advisor for results of
checks on your resources.
"""
@doc """
Adds one or more attachments to an attachment set.
An attachment set is a temporary container for attachments that you add to a
case or case communication. The set is available for 1 hour after it's created.
The `expiryTime` returned in the response is when the set expires.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def add_attachments_to_set(client, input, options \\ []) do
request(client, "AddAttachmentsToSet", input, options)
end
@doc """
Adds additional customer communication to an AWS Support case.
Use the `caseId` parameter to identify the case to which to add communication.
You can list a set of email addresses to copy on the communication by using the
`ccEmailAddresses` parameter. The `communicationBody` value contains the text of
the communication.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def add_communication_to_case(client, input, options \\ []) do
request(client, "AddCommunicationToCase", input, options)
end
@doc """
Creates a case in the AWS Support Center.
This operation is similar to how you create a case in the AWS Support Center
[Create Case](https://console.aws.amazon.com/support/home#/case/create) page. The AWS Support API doesn't support requesting service limit increases. You can
submit a service limit increase in the following ways:
* Submit a request from the AWS Support Center [Create
Case](https://console.aws.amazon.com/support/home#/case/create) page.
* Use the Service Quotas
[RequestServiceQuotaIncrease](https://docs.aws.amazon.com/servicequotas/2019-06-24/apireference/API_RequestServiceQuotaIncrease.html) operation.
A successful `CreateCase` request returns an AWS Support case number. You can
use the `DescribeCases` operation and specify the case number to get existing
AWS Support cases. After you create a case, use the `AddCommunicationToCase`
operation to add additional communication or attachments to an existing case.
The `caseId` is separate from the `displayId` that appears in the [AWS Support
Center](https://console.aws.amazon.com/support). Use the `DescribeCases`
operation to get the `displayId`.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def create_case(client, input, options \\ []) do
request(client, "CreateCase", input, options)
end
@doc """
Returns the attachment that has the specified ID.
Attachments can include screenshots, error logs, or other files that describe
your issue. Attachment IDs are generated by the case management system when you
add an attachment to a case or case communication. Attachment IDs are returned
in the `AttachmentDetails` objects that are returned by the
`DescribeCommunications` operation.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def describe_attachment(client, input, options \\ []) do
request(client, "DescribeAttachment", input, options)
end
@doc """
Returns a list of cases that you specify by passing one or more case IDs.
You can use the `afterTime` and `beforeTime` parameters to filter the cases by
date. You can set values for the `includeResolvedCases` and
`includeCommunications` parameters to specify how much information to return.
The response returns the following in JSON format:
* One or more
[CaseDetails](https://docs.aws.amazon.com/awssupport/latest/APIReference/API_CaseDetails.html) data types.
* One or more `nextToken` values, which specify where to paginate
the returned records represented by the `CaseDetails` objects.
Case data is available for 12 months after creation. If a case was created more
than 12 months ago, a request might return an error.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS
Support](http://aws.amazon.com/premiumsupport/).
"""
def describe_cases(client, input, options \\ []) do
request(client, "DescribeCases", input, options)
end
@doc """
Returns communications and attachments for one or more support cases.
Use the `afterTime` and `beforeTime` parameters to filter by date. You can use
the `caseId` parameter to restrict the results to a specific case.
Case data is available for 12 months after creation. If a case was created more
than 12 months ago, a request for data might cause an error.
You can use the `maxResults` and `nextToken` parameters to control the
pagination of the results. Set `maxResults` to the number of cases that you want
to display on each page, and use `nextToken` to specify the resumption of
pagination.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def describe_communications(client, input, options \\ []) do
request(client, "DescribeCommunications", input, options)
end
@doc """
Returns the current list of AWS services and a list of service categories for
each service.
You then use service names and categories in your `CreateCase` requests. Each
AWS service has its own set of categories.
The service codes and category codes correspond to the values that appear in the
**Service** and **Category** lists on the AWS Support Center [Create Case](https://console.aws.amazon.com/support/home#/case/create) page. The values
in those fields don't necessarily match the service codes and categories
returned by the `DescribeServices` operation. Always use the service codes and
categories that the `DescribeServices` operation returns, so that you have the
most recent set of service and category codes.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def describe_services(client, input, options \\ []) do
request(client, "DescribeServices", input, options)
end
@doc """
Returns the list of severity levels that you can assign to an AWS Support case.
The severity level for a case is also a field in the `CaseDetails` data type
that you include for a `CreateCase` request.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def describe_severity_levels(client, input, options \\ []) do
request(client, "DescribeSeverityLevels", input, options)
end
@doc """
Returns the refresh status of the AWS Trusted Advisor checks that have the
specified check IDs.
You can get the check IDs by calling the `DescribeTrustedAdvisorChecks`
operation.
Some checks are refreshed automatically, and you can't return their refresh
statuses by using the `DescribeTrustedAdvisorCheckRefreshStatuses` operation. If
you call this operation for these checks, you might see an
`InvalidParameterValue` error.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def describe_trusted_advisor_check_refresh_statuses(client, input, options \\ []) do
request(client, "DescribeTrustedAdvisorCheckRefreshStatuses", input, options)
end
@doc """
Returns the results of the AWS Trusted Advisor check that has the specified
check ID.
You can get the check IDs by calling the `DescribeTrustedAdvisorChecks`
operation.
The response contains a `TrustedAdvisorCheckResult` object, which contains these
three objects:
* `TrustedAdvisorCategorySpecificSummary`
* `TrustedAdvisorResourceDetail`
* `TrustedAdvisorResourcesSummary`
In addition, the response contains these fields:
* **status** - The alert status of the check: "ok" (green),
"warning" (yellow), "error" (red), or "not_available".
* **timestamp** - The time of the last refresh of the check.
* **checkId** - The unique identifier for the check.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def describe_trusted_advisor_check_result(client, input, options \\ []) do
request(client, "DescribeTrustedAdvisorCheckResult", input, options)
end
@doc """
Returns the results for the AWS Trusted Advisor check summaries for the check
IDs that you specified.
You can get the check IDs by calling the `DescribeTrustedAdvisorChecks`
operation.
The response contains an array of `TrustedAdvisorCheckSummary` objects.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def describe_trusted_advisor_check_summaries(client, input, options \\ []) do
request(client, "DescribeTrustedAdvisorCheckSummaries", input, options)
end
@doc """
Returns information about all available AWS Trusted Advisor checks, including
the name, ID, category, description, and metadata.
You must specify a language code. The AWS Support API currently supports English
("en") and Japanese ("ja"). The response contains a
`TrustedAdvisorCheckDescription` object for each check. You must set the AWS
Region to us-east-1.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def describe_trusted_advisor_checks(client, input, options \\ []) do
request(client, "DescribeTrustedAdvisorChecks", input, options)
end
@doc """
Refreshes the AWS Trusted Advisor check that you specify using the check ID.
You can get the check IDs by calling the `DescribeTrustedAdvisorChecks`
operation.
Some checks are refreshed automatically. If you call the
`RefreshTrustedAdvisorCheck` operation to refresh them, you might see the
`InvalidParameterValue` error.
The response contains a `TrustedAdvisorCheckRefreshStatus` object.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def refresh_trusted_advisor_check(client, input, options \\ []) do
request(client, "RefreshTrustedAdvisorCheck", input, options)
end
@doc """
Resolves a support case.
This operation takes a `caseId` and returns the initial and final state of the
case.
You must have a Business or Enterprise support plan to use the AWS
Support API.
If you call the AWS Support API from an account that does not have
a Business or Enterprise support plan, the `SubscriptionRequiredException` error
message appears. For information about changing your support plan, see [AWS Support](http://aws.amazon.com/premiumsupport/).
"""
def resolve_case(client, input, options \\ []) do
request(client, "ResolveCase", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "support"}
host = build_host("support", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSSupport_20130415.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end | lib/aws/generated/support.ex | 0.861727 | 0.678979 | support.ex | starcoder |
import Kernel, except: [to_binary: 1]
defprotocol Binary.Chars do
@moduledoc %B"""
The Binary.Chars protocol is responsible for
converting a structure to a Binary (only if applicable).
The only function required to be implemented is
`to_binary` which does the conversion.
The `to_binary` function automatically imported
by Kernel invokes this protocol. String
interpolation also invokes to_binary in its
arguments. For example, `"foo#{bar}"` is the same
as `"foo" <> to_binary(bar)`.
"""
@only [BitString, List, Number, Atom, Record]
def to_binary(thing)
end
defimpl Binary.Chars, for: Atom do
@doc """
Convert the atom literally to a binary, except
`nil` which is converted to an empty string.
"""
def to_binary(nil) do
""
end
def to_binary(atom) do
atom_to_binary(atom, :utf8)
end
end
defimpl Binary.Chars, for: BitString do
@doc """
Simply returns the binary itself.
"""
def to_binary(thing) when is_binary(thing) do
thing
end
end
defimpl Binary.Chars, for: List do
@doc """
Consider the list is an iolist and converts it
to a binary. This allows a list of binaries, or
a charlist, or a mix of both, to be converted
successfully.
## Examples
iex> to_binary('foo')
"foo"
iex> to_binary(["foo", 'bar'])
"foobar"
"""
def to_binary(thing) do
try do
iolist_to_binary(thing)
rescue
ArgumentError ->
raise Protocol.UndefinedError,
protocol: __MODULE__,
structure: thing,
extra: "Only iolists are supported"
end
end
end
defimpl Binary.Chars, for: Number do
@doc """
Simply converts the number (integer or a float) to a binary.
"""
@digits 20
@limit :math.pow(10, @digits)
def to_binary(thing) when is_integer(thing) do
integer_to_binary(thing)
end
def to_binary(thing) when thing > @limit do
float_to_binary(thing, scientific: @digits)
end
def to_binary(thing) do
float_to_binary(thing, compact: true, decimals: @digits)
end
end | lib/elixir/lib/binary/chars.ex | 0.87982 | 0.690637 | chars.ex | starcoder |