Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
prompt
stringlengths
94
42.6k
completion
stringlengths
6
120
api
stringlengths
14
68
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import megengine.functional as F from megengine.core import Tensor from official.vision.detection import layers def get_focal_loss( logits: Tensor, labels: Tensor, ignore_label: int = -1, background: int = 0, alpha: float = 0.5, gamma: float = 0, norm_type: str = "fg", ) -> Tensor: r"""Focal Loss for Dense Object Detection: <https://arxiv.org/pdf/1708.02002.pdf> .. math:: FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t) Args: logits (Tensor): the predicted logits with the shape of :math:`(B, A, C)` labels (Tensor): the assigned labels of boxes with shape of :math:`(B, A)` ignore_label (int): the value of ignore class. Default: -1 background (int): the value of background class. Default: 0 alpha (float): parameter to mitigate class imbalance. Default: 0.5 gamma (float): parameter to mitigate easy/hard loss imbalance. Default: 0 norm_type (str): current support "fg", "none": "fg": loss will be normalized by number of fore-ground samples "none": not norm Returns: the calculated focal loss. """ class_range =
F.arange(1, logits.shape[2] + 1)
megengine.functional.arange
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import megengine.functional as F from megengine.core import Tensor from official.vision.detection import layers def get_focal_loss( logits: Tensor, labels: Tensor, ignore_label: int = -1, background: int = 0, alpha: float = 0.5, gamma: float = 0, norm_type: str = "fg", ) -> Tensor: r"""Focal Loss for Dense Object Detection: <https://arxiv.org/pdf/1708.02002.pdf> .. math:: FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t) Args: logits (Tensor): the predicted logits with the shape of :math:`(B, A, C)` labels (Tensor): the assigned labels of boxes with shape of :math:`(B, A)` ignore_label (int): the value of ignore class. Default: -1 background (int): the value of background class. Default: 0 alpha (float): parameter to mitigate class imbalance. Default: 0.5 gamma (float): parameter to mitigate easy/hard loss imbalance. Default: 0 norm_type (str): current support "fg", "none": "fg": loss will be normalized by number of fore-ground samples "none": not norm Returns: the calculated focal loss. """ class_range = F.arange(1, logits.shape[2] + 1) labels =
F.add_axis(labels, axis=2)
megengine.functional.add_axis
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import megengine.functional as F from megengine.core import Tensor from official.vision.detection import layers def get_focal_loss( logits: Tensor, labels: Tensor, ignore_label: int = -1, background: int = 0, alpha: float = 0.5, gamma: float = 0, norm_type: str = "fg", ) -> Tensor: r"""Focal Loss for Dense Object Detection: <https://arxiv.org/pdf/1708.02002.pdf> .. math:: FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t) Args: logits (Tensor): the predicted logits with the shape of :math:`(B, A, C)` labels (Tensor): the assigned labels of boxes with shape of :math:`(B, A)` ignore_label (int): the value of ignore class. Default: -1 background (int): the value of background class. Default: 0 alpha (float): parameter to mitigate class imbalance. Default: 0.5 gamma (float): parameter to mitigate easy/hard loss imbalance. Default: 0 norm_type (str): current support "fg", "none": "fg": loss will be normalized by number of fore-ground samples "none": not norm Returns: the calculated focal loss. """ class_range = F.arange(1, logits.shape[2] + 1) labels = F.add_axis(labels, axis=2) scores =
F.sigmoid(logits)
megengine.functional.sigmoid
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import megengine.functional as F from megengine.core import Tensor from official.vision.detection import layers def get_focal_loss( logits: Tensor, labels: Tensor, ignore_label: int = -1, background: int = 0, alpha: float = 0.5, gamma: float = 0, norm_type: str = "fg", ) -> Tensor: r"""Focal Loss for Dense Object Detection: <https://arxiv.org/pdf/1708.02002.pdf> .. math:: FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t) Args: logits (Tensor): the predicted logits with the shape of :math:`(B, A, C)` labels (Tensor): the assigned labels of boxes with shape of :math:`(B, A)` ignore_label (int): the value of ignore class. Default: -1 background (int): the value of background class. Default: 0 alpha (float): parameter to mitigate class imbalance. Default: 0.5 gamma (float): parameter to mitigate easy/hard loss imbalance. Default: 0 norm_type (str): current support "fg", "none": "fg": loss will be normalized by number of fore-ground samples "none": not norm Returns: the calculated focal loss. """ class_range = F.arange(1, logits.shape[2] + 1) labels = F.add_axis(labels, axis=2) scores = F.sigmoid(logits) pos_part = (1 - scores) ** gamma * layers.logsigmoid(logits) neg_part = scores ** gamma * layers.logsigmoid(-logits) pos_loss = -(labels == class_range) * pos_part * alpha neg_loss = ( -(labels != class_range) * (labels != ignore_label) * neg_part * (1 - alpha) ) loss = (pos_loss + neg_loss).sum() if norm_type == "fg": fg_mask = (labels != background) * (labels != ignore_label) return loss / F.maximum(fg_mask.sum(), 1) elif norm_type == "none": return loss else: raise NotImplementedError def get_smooth_l1_loss( pred_bbox: Tensor, gt_bbox: Tensor, labels: Tensor, beta: int = 1, background: int = 0, ignore_label: int = -1, norm_type: str = "fg", ) -> Tensor: r"""Smooth l1 loss used in RetinaNet. Args: pred_bbox (Tensor): the predicted bbox with the shape of :math:`(B, A, 4)` gt_bbox (Tensor): the ground-truth bbox with the shape of :math:`(B, A, 4)` labels (Tensor): the assigned labels of boxes with shape of :math:`(B, A)` beta (int): the parameter of smooth l1 loss. Default: 1 background (int): the value of background class. Default: 0 ignore_label (int): the value of ignore class. Default: -1 norm_type (str): current support "fg", "all", "none": "fg": loss will be normalized by number of fore-ground samples "all": loss will be normalized by number of all samples "none": not norm Returns: the calculated smooth l1 loss. """ pred_bbox = pred_bbox.reshape(-1, 4) gt_bbox = gt_bbox.reshape(-1, 4) labels = labels.reshape(-1) fg_mask = (labels != background) * (labels != ignore_label) loss = get_smooth_l1_base(pred_bbox, gt_bbox, beta) loss = (loss.sum(axis=1) * fg_mask).sum() if norm_type == "fg": loss = loss / F.maximum(fg_mask.sum(), 1) elif norm_type == "all": all_mask = labels != ignore_label loss = loss / F.maximum(all_mask.sum(), 1) elif norm_type == "none": return loss else: raise NotImplementedError return loss def get_smooth_l1_base(pred_bbox: Tensor, gt_bbox: Tensor, beta: float) -> Tensor: r""" Args: pred_bbox (Tensor): the predicted bbox with the shape of :math:`(N, 4)` gt_bbox (Tensor): the ground-truth bbox with the shape of :math:`(N, 4)` beta (int): the parameter of smooth l1 loss. Returns: the calculated smooth l1 loss. """ x = pred_bbox - gt_bbox abs_x =
F.abs(x)
megengine.functional.abs
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import megengine.functional as F from megengine.core import Tensor from official.vision.detection import layers def get_focal_loss( logits: Tensor, labels: Tensor, ignore_label: int = -1, background: int = 0, alpha: float = 0.5, gamma: float = 0, norm_type: str = "fg", ) -> Tensor: r"""Focal Loss for Dense Object Detection: <https://arxiv.org/pdf/1708.02002.pdf> .. math:: FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t) Args: logits (Tensor): the predicted logits with the shape of :math:`(B, A, C)` labels (Tensor): the assigned labels of boxes with shape of :math:`(B, A)` ignore_label (int): the value of ignore class. Default: -1 background (int): the value of background class. Default: 0 alpha (float): parameter to mitigate class imbalance. Default: 0.5 gamma (float): parameter to mitigate easy/hard loss imbalance. Default: 0 norm_type (str): current support "fg", "none": "fg": loss will be normalized by number of fore-ground samples "none": not norm Returns: the calculated focal loss. """ class_range = F.arange(1, logits.shape[2] + 1) labels = F.add_axis(labels, axis=2) scores = F.sigmoid(logits) pos_part = (1 - scores) ** gamma * layers.logsigmoid(logits) neg_part = scores ** gamma * layers.logsigmoid(-logits) pos_loss = -(labels == class_range) * pos_part * alpha neg_loss = ( -(labels != class_range) * (labels != ignore_label) * neg_part * (1 - alpha) ) loss = (pos_loss + neg_loss).sum() if norm_type == "fg": fg_mask = (labels != background) * (labels != ignore_label) return loss / F.maximum(fg_mask.sum(), 1) elif norm_type == "none": return loss else: raise NotImplementedError def get_smooth_l1_loss( pred_bbox: Tensor, gt_bbox: Tensor, labels: Tensor, beta: int = 1, background: int = 0, ignore_label: int = -1, norm_type: str = "fg", ) -> Tensor: r"""Smooth l1 loss used in RetinaNet. Args: pred_bbox (Tensor): the predicted bbox with the shape of :math:`(B, A, 4)` gt_bbox (Tensor): the ground-truth bbox with the shape of :math:`(B, A, 4)` labels (Tensor): the assigned labels of boxes with shape of :math:`(B, A)` beta (int): the parameter of smooth l1 loss. Default: 1 background (int): the value of background class. Default: 0 ignore_label (int): the value of ignore class. Default: -1 norm_type (str): current support "fg", "all", "none": "fg": loss will be normalized by number of fore-ground samples "all": loss will be normalized by number of all samples "none": not norm Returns: the calculated smooth l1 loss. """ pred_bbox = pred_bbox.reshape(-1, 4) gt_bbox = gt_bbox.reshape(-1, 4) labels = labels.reshape(-1) fg_mask = (labels != background) * (labels != ignore_label) loss = get_smooth_l1_base(pred_bbox, gt_bbox, beta) loss = (loss.sum(axis=1) * fg_mask).sum() if norm_type == "fg": loss = loss / F.maximum(fg_mask.sum(), 1) elif norm_type == "all": all_mask = labels != ignore_label loss = loss / F.maximum(all_mask.sum(), 1) elif norm_type == "none": return loss else: raise NotImplementedError return loss def get_smooth_l1_base(pred_bbox: Tensor, gt_bbox: Tensor, beta: float) -> Tensor: r""" Args: pred_bbox (Tensor): the predicted bbox with the shape of :math:`(N, 4)` gt_bbox (Tensor): the ground-truth bbox with the shape of :math:`(N, 4)` beta (int): the parameter of smooth l1 loss. Returns: the calculated smooth l1 loss. """ x = pred_bbox - gt_bbox abs_x = F.abs(x) if beta < 1e-5: loss = abs_x else: in_loss = 0.5 * x ** 2 / beta out_loss = abs_x - 0.5 * beta # FIXME: F.where cannot handle 0-shape tensor yet # loss = F.where(abs_x < beta, in_loss, out_loss) in_mask = abs_x < beta loss = in_loss * in_mask + out_loss * (1 - in_mask) return loss def softmax_loss(scores: Tensor, labels: Tensor, ignore_label: int = -1) -> Tensor: max_scores = F.zero_grad(scores.max(axis=1, keepdims=True)) scores -= max_scores log_prob = scores - F.log(
F.exp(scores)
megengine.functional.exp
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import numpy as np import pytest from basecls.models.repvgg import RepVGGBlock @pytest.mark.parametrize("w_in", [32, 64]) @pytest.mark.parametrize("w_out", [64]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("groups", [1, 2, 4]) @pytest.mark.parametrize("se_r", [0.0, 0.25]) @pytest.mark.parametrize("act_name", ["relu"]) def test_block(w_in, w_out, stride, groups, se_r, act_name): m = RepVGGBlock(w_in, w_out, stride, groups, se_r, act_name, deploy=False) assert isinstance(m, M.Module) m.eval() x =
mge.random.uniform(size=(2, w_in, 8, 8))
megengine.random.uniform
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih =
M.Linear(input_size, 3 * hidden_size, bias=bias)
megengine.module.Linear
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh =
M.Linear(hidden_size, 3 * hidden_size, bias=bias)
megengine.module.Linear
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x =
F.reshape(x, (-1, x.shape[1]))
megengine.functional.reshape
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n =
F.split(gate_x, 3, axis=1)
megengine.functional.split
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n =
F.split(gate_h, 3, axis=1)
megengine.functional.split
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate =
F.sigmoid(i_r + h_r)
megengine.functional.sigmoid
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate =
F.sigmoid(i_i + h_i)
megengine.functional.sigmoid
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h =
M.Linear(input_size, 4 * hidden_size, bias=bias)
megengine.module.Linear
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h =
M.Linear(hidden_size, 4 * hidden_size, bias=bias)
megengine.module.Linear
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x =
F.reshape(x, (-1, x.shape[1]))
megengine.functional.reshape
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate =
F.split(gates, 4, axis=1)
megengine.functional.split
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate =
F.sigmoid(ingate)
megengine.functional.sigmoid
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate =
F.sigmoid(forgetgate)
megengine.functional.sigmoid
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate =
F.tanh(cellgate)
megengine.functional.tanh
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate =
F.sigmoid(outgate)
megengine.functional.sigmoid
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters():
M.init.uniform_(w, -std, std)
megengine.module.init.uniform_
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 =
F.zeros((self.num_layers, batch, self.hidden_size))
megengine.functional.zeros
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output =
F.stack(outs, axis=1)
megengine.functional.stack
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output =
F.stack(outs, axis=0)
megengine.functional.stack
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters():
M.init.uniform_(w, -std, std)
megengine.module.init.uniform_
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy =
F.mul(cx, forgetgate)
megengine.functional.mul
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = F.mul(cx, forgetgate) +
F.mul(ingate, cellgate)
megengine.functional.mul
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate) hy = F.mul(outgate,
F.tanh(cy)
megengine.functional.tanh
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate) hy = F.mul(outgate, F.tanh(cy)) return (hy, cy) class LSTM(M.Module): """ An implementation of LSTMModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append( LSTMCell(self.input_size, self.hidden_size, self.bias) ) for l in range(1, self.num_layers): self.rnn_cell_list.append( LSTMCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 =
F.zeros((self.num_layers, batch, self.hidden_size))
megengine.functional.zeros
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate) hy = F.mul(outgate, F.tanh(cy)) return (hy, cy) class LSTM(M.Module): """ An implementation of LSTMModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append( LSTMCell(self.input_size, self.hidden_size, self.bias) ) for l in range(1, self.num_layers): self.rnn_cell_list.append( LSTMCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) c0 =
F.zeros((self.num_layers, batch, self.hidden_size))
megengine.functional.zeros
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate) hy = F.mul(outgate, F.tanh(cy)) return (hy, cy) class LSTM(M.Module): """ An implementation of LSTMModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append( LSTMCell(self.input_size, self.hidden_size, self.bias) ) for l in range(1, self.num_layers): self.rnn_cell_list.append( LSTMCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) c0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx[0] c0 = hx[1] outs = [] hidden = list() for layer in range(self.num_layers): hidden.append((h0[layer, :, :], c0[layer, :, :])) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: inp = input[:, t, :] if self.batch_first else input[t, :, :] hidden_l = self.rnn_cell_list[layer]( inp, (hidden[layer][0], hidden[layer][1]) ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1]) ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = ( F.dropout(hidden_l[0], self.dropout), F.dropout(hidden_l[1], self.dropout), ) hidden[layer] = hidden_l outs.append(hidden_l[0]) if self.batch_first: output =
F.stack(outs, axis=1)
megengine.functional.stack
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate) hy = F.mul(outgate, F.tanh(cy)) return (hy, cy) class LSTM(M.Module): """ An implementation of LSTMModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append( LSTMCell(self.input_size, self.hidden_size, self.bias) ) for l in range(1, self.num_layers): self.rnn_cell_list.append( LSTMCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) c0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx[0] c0 = hx[1] outs = [] hidden = list() for layer in range(self.num_layers): hidden.append((h0[layer, :, :], c0[layer, :, :])) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: inp = input[:, t, :] if self.batch_first else input[t, :, :] hidden_l = self.rnn_cell_list[layer]( inp, (hidden[layer][0], hidden[layer][1]) ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1]) ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = ( F.dropout(hidden_l[0], self.dropout), F.dropout(hidden_l[1], self.dropout), ) hidden[layer] = hidden_l outs.append(hidden_l[0]) if self.batch_first: output = F.stack(outs, axis=1) else: output =
F.stack(outs, axis=0)
megengine.functional.stack
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l =
F.dropout(hidden_l, self.dropout)
megengine.functional.dropout
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate) hy = F.mul(outgate, F.tanh(cy)) return (hy, cy) class LSTM(M.Module): """ An implementation of LSTMModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append( LSTMCell(self.input_size, self.hidden_size, self.bias) ) for l in range(1, self.num_layers): self.rnn_cell_list.append( LSTMCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) c0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx[0] c0 = hx[1] outs = [] hidden = list() for layer in range(self.num_layers): hidden.append((h0[layer, :, :], c0[layer, :, :])) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: inp = input[:, t, :] if self.batch_first else input[t, :, :] hidden_l = self.rnn_cell_list[layer]( inp, (hidden[layer][0], hidden[layer][1]) ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1]) ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = (
F.dropout(hidden_l[0], self.dropout)
megengine.functional.dropout
import math import numpy as np import megengine as mge import megengine.functional as F import megengine.module as M # ================================= GRU Implementation ========================================================== class GRUCell(M.Module): """ An implementation of GRUCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias) self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): x = F.reshape(x, (-1, x.shape[1])) gate_x = self.ih(x) gate_h = self.hh(hidden) i_r, i_i, i_n = F.split(gate_x, 3, axis=1) h_r, h_i, h_n = F.split(gate_h, 3, axis=1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + (resetgate * h_n)) hy = newgate + inputgate * (hidden - newgate) return hy class GRU(M.Module): """ An implementation of GRUModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias)) for l in range(1, self.num_layers): self.rnn_cell_list.append( GRUCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx outs = [] hidden = list() for layer in range(self.num_layers): hidden.append(h0[layer, :, :]) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: if self.batch_first: hidden_l = self.rnn_cell_list[layer]( input[:, t, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( input[t, :, :], hidden[layer] ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1], hidden[layer] ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = F.dropout(hidden_l, self.dropout) hidden[layer] = hidden_l outs.append(hidden_l) if self.batch_first: output = F.stack(outs, axis=1) else: output = F.stack(outs, axis=0) return output # ================================= LSTM Implementation ========================================================== class LSTMCell(M.Module): """ An implementation of LSTMCell. """ def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias) self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): M.init.uniform_(w, -std, std) def forward(self, x, hidden): hx, cx = hidden x = F.reshape(x, (-1, x.shape[1])) gates = self.x2h(x) + self.h2h(hx) ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1) ingate = F.sigmoid(ingate) forgetgate = F.sigmoid(forgetgate) cellgate = F.tanh(cellgate) outgate = F.sigmoid(outgate) cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate) hy = F.mul(outgate, F.tanh(cy)) return (hy, cy) class LSTM(M.Module): """ An implementation of LSTMModule. """ def __init__( self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.batch_first = batch_first self.dropout = dropout self.rnn_cell_list = [] self.rnn_cell_list.append( LSTMCell(self.input_size, self.hidden_size, self.bias) ) for l in range(1, self.num_layers): self.rnn_cell_list.append( LSTMCell(self.hidden_size, self.hidden_size, self.bias) ) def forward(self, input, hx=None): if hx is None: batch = input.shape[0] if self.batch_first else input.shape[1] h0 = F.zeros((self.num_layers, batch, self.hidden_size)) c0 = F.zeros((self.num_layers, batch, self.hidden_size)) else: h0 = hx[0] c0 = hx[1] outs = [] hidden = list() for layer in range(self.num_layers): hidden.append((h0[layer, :, :], c0[layer, :, :])) length = input.shape[1] if self.batch_first else input.shape[0] for t in range(length): for layer in range(self.num_layers): if layer == 0: inp = input[:, t, :] if self.batch_first else input[t, :, :] hidden_l = self.rnn_cell_list[layer]( inp, (hidden[layer][0], hidden[layer][1]) ) else: hidden_l = self.rnn_cell_list[layer]( hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1]) ) if self.dropout and (layer is not self.num_layers - 1): hidden_l = ( F.dropout(hidden_l[0], self.dropout),
F.dropout(hidden_l[1], self.dropout)
megengine.functional.dropout
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 =
M.Conv2d(input_dim, hidden_dim, 3, padding=1)
megengine.module.Conv2d
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 =
M.Conv2d(hidden_dim, 2, 3, padding=1)
megengine.module.Conv2d
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu =
M.ReLU()
megengine.module.ReLU
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx =
F.concat([h, x], axis=1)
megengine.functional.concat
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx =
F.concat([h, x], axis=1)
megengine.functional.concat
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 =
M.Conv2d(cor_planes, 256, 1, padding=0)
megengine.module.Conv2d
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 =
M.Conv2d(256, 192, 3, padding=1)
megengine.module.Conv2d
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = M.Conv2d(256, 192, 3, padding=1) self.convf1 =
M.Conv2d(2, 128, 7, padding=3)
megengine.module.Conv2d
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = M.Conv2d(256, 192, 3, padding=1) self.convf1 = M.Conv2d(2, 128, 7, padding=3) self.convf2 =
M.Conv2d(128, 64, 3, padding=1)
megengine.module.Conv2d
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = M.Conv2d(256, 192, 3, padding=1) self.convf1 = M.Conv2d(2, 128, 7, padding=3) self.convf2 = M.Conv2d(128, 64, 3, padding=1) self.conv =
M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
megengine.module.Conv2d
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = M.Conv2d(256, 192, 3, padding=1) self.convf1 = M.Conv2d(2, 128, 7, padding=3) self.convf2 = M.Conv2d(128, 64, 3, padding=1) self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1) def forward(self, flow, corr): cor = F.relu(self.convc1(corr)) cor = F.relu(self.convc2(cor)) flo = F.relu(self.convf1(flow)) flo = F.relu(self.convf2(flo)) cor_flo =
F.concat([cor, flo], axis=1)
megengine.functional.concat
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = M.Conv2d(256, 192, 3, padding=1) self.convf1 = M.Conv2d(2, 128, 7, padding=3) self.convf2 = M.Conv2d(128, 64, 3, padding=1) self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1) def forward(self, flow, corr): cor = F.relu(self.convc1(corr)) cor = F.relu(self.convc2(cor)) flo = F.relu(self.convf1(flow)) flo = F.relu(self.convf2(flo)) cor_flo = F.concat([cor, flo], axis=1) out = F.relu(self.conv(cor_flo)) return
F.concat([out, flow], axis=1)
megengine.functional.concat
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = M.Conv2d(256, 192, 3, padding=1) self.convf1 = M.Conv2d(2, 128, 7, padding=3) self.convf2 = M.Conv2d(128, 64, 3, padding=1) self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1) def forward(self, flow, corr): cor = F.relu(self.convc1(corr)) cor = F.relu(self.convc2(cor)) flo = F.relu(self.convf1(flow)) flo = F.relu(self.convf2(flo)) cor_flo = F.concat([cor, flo], axis=1) out = F.relu(self.conv(cor_flo)) return F.concat([out, flow], axis=1) class BasicUpdateBlock(M.Module): def __init__(self, hidden_dim, cor_planes, mask_size=8): super(BasicUpdateBlock, self).__init__() self.encoder = BasicMotionEncoder(cor_planes) self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim) self.flow_head = FlowHead(hidden_dim, hidden_dim=256) self.mask = M.Sequential( M.Conv2d(128, 256, 3, padding=1), M.ReLU(), M.Conv2d(256, mask_size**2 * 9, 1, padding=0), ) def forward(self, net, inp, corr, flow, upsample=True): motion_features = self.encoder(flow, corr) inp =
F.concat([inp, motion_features], axis=1)
megengine.functional.concat
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = M.Conv2d(256, 192, 3, padding=1) self.convf1 = M.Conv2d(2, 128, 7, padding=3) self.convf2 = M.Conv2d(128, 64, 3, padding=1) self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1) def forward(self, flow, corr): cor = F.relu(self.convc1(corr)) cor = F.relu(self.convc2(cor)) flo = F.relu(self.convf1(flow)) flo = F.relu(self.convf2(flo)) cor_flo = F.concat([cor, flo], axis=1) out = F.relu(self.conv(cor_flo)) return F.concat([out, flow], axis=1) class BasicUpdateBlock(M.Module): def __init__(self, hidden_dim, cor_planes, mask_size=8): super(BasicUpdateBlock, self).__init__() self.encoder = BasicMotionEncoder(cor_planes) self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim) self.flow_head = FlowHead(hidden_dim, hidden_dim=256) self.mask = M.Sequential(
M.Conv2d(128, 256, 3, padding=1)
megengine.module.Conv2d
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = M.Conv2d(256, 192, 3, padding=1) self.convf1 = M.Conv2d(2, 128, 7, padding=3) self.convf2 = M.Conv2d(128, 64, 3, padding=1) self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1) def forward(self, flow, corr): cor = F.relu(self.convc1(corr)) cor = F.relu(self.convc2(cor)) flo = F.relu(self.convf1(flow)) flo = F.relu(self.convf2(flo)) cor_flo = F.concat([cor, flo], axis=1) out = F.relu(self.conv(cor_flo)) return F.concat([out, flow], axis=1) class BasicUpdateBlock(M.Module): def __init__(self, hidden_dim, cor_planes, mask_size=8): super(BasicUpdateBlock, self).__init__() self.encoder = BasicMotionEncoder(cor_planes) self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim) self.flow_head = FlowHead(hidden_dim, hidden_dim=256) self.mask = M.Sequential( M.Conv2d(128, 256, 3, padding=1),
M.ReLU()
megengine.module.ReLU
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q return h class BasicMotionEncoder(M.Module): def __init__(self, cor_planes): super(BasicMotionEncoder, self).__init__() self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = M.Conv2d(256, 192, 3, padding=1) self.convf1 = M.Conv2d(2, 128, 7, padding=3) self.convf2 = M.Conv2d(128, 64, 3, padding=1) self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1) def forward(self, flow, corr): cor = F.relu(self.convc1(corr)) cor = F.relu(self.convc2(cor)) flo = F.relu(self.convf1(flow)) flo = F.relu(self.convf2(flo)) cor_flo = F.concat([cor, flo], axis=1) out = F.relu(self.conv(cor_flo)) return F.concat([out, flow], axis=1) class BasicUpdateBlock(M.Module): def __init__(self, hidden_dim, cor_planes, mask_size=8): super(BasicUpdateBlock, self).__init__() self.encoder = BasicMotionEncoder(cor_planes) self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim) self.flow_head = FlowHead(hidden_dim, hidden_dim=256) self.mask = M.Sequential( M.Conv2d(128, 256, 3, padding=1), M.ReLU(),
M.Conv2d(256, mask_size**2 * 9, 1, padding=0)
megengine.module.Conv2d
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(
F.concat([r * h, x], axis=1)
megengine.functional.concat
import megengine.module as M import megengine.functional as F class FlowHead(M.Module): def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = M.ReLU() def forward(self, x): return self.conv2(self.relu(self.conv1(x))) class SepConvGRU(M.Module): def __init__(self, hidden_dim=128, input_dim=192 + 128): super(SepConvGRU, self).__init__() self.convz1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convr1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convq1 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2) ) self.convz2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convr2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) self.convq2 = M.Conv2d( hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0) ) def forward(self, h, x): # horizontal hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz1(hx)) r = F.sigmoid(self.convr1(hx)) q = F.tanh(self.convq1(F.concat([r * h, x], axis=1))) h = (1 - z) * h + z * q # vertical hx = F.concat([h, x], axis=1) z = F.sigmoid(self.convz2(hx)) r = F.sigmoid(self.convr2(hx)) q = F.tanh(self.convq2(
F.concat([r * h, x], axis=1)
megengine.functional.concat
#!/usr/bin/env python3 from dataset import SIDDValData from model import UNetD import megengine.data as data from utils import batch_PSNR from tqdm import tqdm import argparse import pickle import megengine def test(args): valid_dataset = SIDDValData(args.data) valid_sampler = data.SequentialSampler( valid_dataset, batch_size=1, drop_last=False ) valid_dataloader = data.DataLoader( valid_dataset, sampler=valid_sampler, num_workers=8, ) model = UNetD(3) with open(args.checkpoint, "rb") as f: state = pickle.load(f) model.load_state_dict(state["state_dict"]) model.eval() def valid_step(image, label): pred = model(image) pred = image - pred psnr_it = batch_PSNR(pred, label) return psnr_it def valid(func, data_queue): psnr_v = 0. for step, (image, label) in tqdm(enumerate(data_queue)): image =
megengine.tensor(image)
megengine.tensor
#!/usr/bin/env python3 from dataset import SIDDValData from model import UNetD import megengine.data as data from utils import batch_PSNR from tqdm import tqdm import argparse import pickle import megengine def test(args): valid_dataset = SIDDValData(args.data) valid_sampler = data.SequentialSampler( valid_dataset, batch_size=1, drop_last=False ) valid_dataloader = data.DataLoader( valid_dataset, sampler=valid_sampler, num_workers=8, ) model = UNetD(3) with open(args.checkpoint, "rb") as f: state = pickle.load(f) model.load_state_dict(state["state_dict"]) model.eval() def valid_step(image, label): pred = model(image) pred = image - pred psnr_it = batch_PSNR(pred, label) return psnr_it def valid(func, data_queue): psnr_v = 0. for step, (image, label) in tqdm(enumerate(data_queue)): image = megengine.tensor(image) label =
megengine.tensor(label)
megengine.tensor
import os import numpy as np import collections import megengine.module as M import megengine.functional as F import megengine as mge from megengine.data.dataset import Dataset from megengine.data import DataLoader import hparams as hp from megengine.data import Collator class AsrDataset(Dataset): def __init__(self, data_set="train"): """ Args: root_dir (string): Directory with all the spectrograms. """ self.metas = self.load_metas(hp.dataset_root, data_set) def load_metas(self, root, data_set): # fix a bug metas = [] with open(os.path.join(root, f"{data_set}.txt")) as f: for line in f.readlines(): info = line.split("|") metas.append( { "mel_path": os.path.join(root, info[0]), "frames": info[1], "token_ids_str": info[2], "speaker": info[3], } ) return metas def __len__(self): return len(self.metas) def __getitem__(self, idx): meta = self.metas[idx] token_ids = [int(i) for i in meta["token_ids_str"].split(" ")] text = np.array(token_ids, dtype=np.int32) mel = np.load(meta["mel_path"]) text_input = text[:-1] text_output = text[1:] text_length = text_input.shape[0] pos_text = np.arange(1, text_length + 1) pos_mel = np.arange(1, mel.shape[0] + 1) return { "text": text, "text_input": text_input, "text_output": text_output, "text_length": text_length, "mel": mel, "pos_mel": pos_mel, "pos_text": pos_text, } class AsrCollator(Collator): def __init__(self, pad_value: float = 0.0): super().__init__() self.pad_value = pad_value def apply(self, batch): # Puts each data field into a tensor with outer dimension batch size if isinstance(batch[0], collections.Mapping): text = [d["text"] for d in batch] text_input = [d["text_input"] for d in batch] text_output = [d["text_output"] for d in batch] text_length = [d["text_length"] for d in batch] mel = [d["mel"] for d in batch] mel_length = [d["mel"].shape[0] for d in batch] pos_mel = [d["pos_mel"] for d in batch] pos_text = [d["pos_text"] for d in batch] text = [ i for i, _ in sorted( zip(text, mel_length), key=lambda x: x[1], reverse=True ) ] text_input = [ i for i, _ in sorted( zip(text_input, mel_length), key=lambda x: x[1], reverse=True ) ] text_output = [ i for i, _ in sorted( zip(text_output, mel_length), key=lambda x: x[1], reverse=True ) ] text_length = [ i for i, _ in sorted( zip(text_length, mel_length), key=lambda x: x[1], reverse=True ) ] mel = [ i for i, _ in sorted( zip(mel, mel_length), key=lambda x: x[1], reverse=True ) ] pos_text = [ i for i, _ in sorted( zip(pos_text, mel_length), key=lambda x: x[1], reverse=True ) ] pos_mel = [ i for i, _ in sorted( zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True ) ] mel_length = sorted(mel_length, reverse=True) # PAD sequences with largest length of the batch text_input = _prepare_data(text_input).astype(np.int32) text_output = _prepare_data(text_output).astype(np.int32) mel = _pad_mel(mel) pos_mel = _prepare_data(pos_mel).astype(np.int32) pos_text = _prepare_data(pos_text).astype(np.int32) return ( mge.Tensor(text_input), mge.Tensor(text_output), mge.Tensor(mel), mge.Tensor(pos_text), mge.Tensor(pos_mel), mge.Tensor(text_length), mge.Tensor(mel_length), ) raise TypeError( ( "batch must contain tensors, numbers, dicts or lists; found {}".format( type(batch[0]) ) ) ) def collate_fn_transformer_test(batch): # Puts each data field into a tensor with outer dimension batch size # if isinstance(batch[0], collections.Mapping): text = [batch["text"]] # for d in batch] text_input = batch["text_input"] text_output = batch["text_output"] text_length = batch["text_length"] mel = [batch["mel"]] mel_length = [batch["mel"].shape[1]] pos_mel = batch["pos_mel"] pos_text = batch["pos_text"] text = [ i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True) ] text_input = [ i for i, _ in sorted( zip(text_input, mel_length), key=lambda x: x[1], reverse=True ) ] text_output = [ i for i, _ in sorted( zip(text_output, mel_length), key=lambda x: x[1], reverse=True ) ] text_length = [ i for i, _ in sorted( zip(text_length, mel_length), key=lambda x: x[1], reverse=True ) ] mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)] pos_text = [ i for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True) ] pos_mel = [ i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True) ] mel_length = sorted(mel_length, reverse=True) # PAD sequences with largest length of the batch text_input = _prepare_data(text_input).astype(np.int32) text_output = _prepare_data(text_output).astype(np.int32) mel = _pad_mel(mel[0]) pos_mel = _prepare_data(pos_mel).astype(np.int32) pos_text = _prepare_data(pos_text).astype(np.int32) return (
mge.Tensor(text_input)
megengine.Tensor
import os import numpy as np import collections import megengine.module as M import megengine.functional as F import megengine as mge from megengine.data.dataset import Dataset from megengine.data import DataLoader import hparams as hp from megengine.data import Collator class AsrDataset(Dataset): def __init__(self, data_set="train"): """ Args: root_dir (string): Directory with all the spectrograms. """ self.metas = self.load_metas(hp.dataset_root, data_set) def load_metas(self, root, data_set): # fix a bug metas = [] with open(os.path.join(root, f"{data_set}.txt")) as f: for line in f.readlines(): info = line.split("|") metas.append( { "mel_path": os.path.join(root, info[0]), "frames": info[1], "token_ids_str": info[2], "speaker": info[3], } ) return metas def __len__(self): return len(self.metas) def __getitem__(self, idx): meta = self.metas[idx] token_ids = [int(i) for i in meta["token_ids_str"].split(" ")] text = np.array(token_ids, dtype=np.int32) mel = np.load(meta["mel_path"]) text_input = text[:-1] text_output = text[1:] text_length = text_input.shape[0] pos_text = np.arange(1, text_length + 1) pos_mel = np.arange(1, mel.shape[0] + 1) return { "text": text, "text_input": text_input, "text_output": text_output, "text_length": text_length, "mel": mel, "pos_mel": pos_mel, "pos_text": pos_text, } class AsrCollator(Collator): def __init__(self, pad_value: float = 0.0): super().__init__() self.pad_value = pad_value def apply(self, batch): # Puts each data field into a tensor with outer dimension batch size if isinstance(batch[0], collections.Mapping): text = [d["text"] for d in batch] text_input = [d["text_input"] for d in batch] text_output = [d["text_output"] for d in batch] text_length = [d["text_length"] for d in batch] mel = [d["mel"] for d in batch] mel_length = [d["mel"].shape[0] for d in batch] pos_mel = [d["pos_mel"] for d in batch] pos_text = [d["pos_text"] for d in batch] text = [ i for i, _ in sorted( zip(text, mel_length), key=lambda x: x[1], reverse=True ) ] text_input = [ i for i, _ in sorted( zip(text_input, mel_length), key=lambda x: x[1], reverse=True ) ] text_output = [ i for i, _ in sorted( zip(text_output, mel_length), key=lambda x: x[1], reverse=True ) ] text_length = [ i for i, _ in sorted( zip(text_length, mel_length), key=lambda x: x[1], reverse=True ) ] mel = [ i for i, _ in sorted( zip(mel, mel_length), key=lambda x: x[1], reverse=True ) ] pos_text = [ i for i, _ in sorted( zip(pos_text, mel_length), key=lambda x: x[1], reverse=True ) ] pos_mel = [ i for i, _ in sorted( zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True ) ] mel_length = sorted(mel_length, reverse=True) # PAD sequences with largest length of the batch text_input = _prepare_data(text_input).astype(np.int32) text_output = _prepare_data(text_output).astype(np.int32) mel = _pad_mel(mel) pos_mel = _prepare_data(pos_mel).astype(np.int32) pos_text = _prepare_data(pos_text).astype(np.int32) return ( mge.Tensor(text_input), mge.Tensor(text_output), mge.Tensor(mel), mge.Tensor(pos_text), mge.Tensor(pos_mel), mge.Tensor(text_length), mge.Tensor(mel_length), ) raise TypeError( ( "batch must contain tensors, numbers, dicts or lists; found {}".format( type(batch[0]) ) ) ) def collate_fn_transformer_test(batch): # Puts each data field into a tensor with outer dimension batch size # if isinstance(batch[0], collections.Mapping): text = [batch["text"]] # for d in batch] text_input = batch["text_input"] text_output = batch["text_output"] text_length = batch["text_length"] mel = [batch["mel"]] mel_length = [batch["mel"].shape[1]] pos_mel = batch["pos_mel"] pos_text = batch["pos_text"] text = [ i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True) ] text_input = [ i for i, _ in sorted( zip(text_input, mel_length), key=lambda x: x[1], reverse=True ) ] text_output = [ i for i, _ in sorted( zip(text_output, mel_length), key=lambda x: x[1], reverse=True ) ] text_length = [ i for i, _ in sorted( zip(text_length, mel_length), key=lambda x: x[1], reverse=True ) ] mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)] pos_text = [ i for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True) ] pos_mel = [ i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True) ] mel_length = sorted(mel_length, reverse=True) # PAD sequences with largest length of the batch text_input = _prepare_data(text_input).astype(np.int32) text_output = _prepare_data(text_output).astype(np.int32) mel = _pad_mel(mel[0]) pos_mel = _prepare_data(pos_mel).astype(np.int32) pos_text = _prepare_data(pos_text).astype(np.int32) return ( mge.Tensor(text_input),
mge.Tensor(text_output)
megengine.Tensor
import os import numpy as np import collections import megengine.module as M import megengine.functional as F import megengine as mge from megengine.data.dataset import Dataset from megengine.data import DataLoader import hparams as hp from megengine.data import Collator class AsrDataset(Dataset): def __init__(self, data_set="train"): """ Args: root_dir (string): Directory with all the spectrograms. """ self.metas = self.load_metas(hp.dataset_root, data_set) def load_metas(self, root, data_set): # fix a bug metas = [] with open(os.path.join(root, f"{data_set}.txt")) as f: for line in f.readlines(): info = line.split("|") metas.append( { "mel_path": os.path.join(root, info[0]), "frames": info[1], "token_ids_str": info[2], "speaker": info[3], } ) return metas def __len__(self): return len(self.metas) def __getitem__(self, idx): meta = self.metas[idx] token_ids = [int(i) for i in meta["token_ids_str"].split(" ")] text = np.array(token_ids, dtype=np.int32) mel = np.load(meta["mel_path"]) text_input = text[:-1] text_output = text[1:] text_length = text_input.shape[0] pos_text = np.arange(1, text_length + 1) pos_mel = np.arange(1, mel.shape[0] + 1) return { "text": text, "text_input": text_input, "text_output": text_output, "text_length": text_length, "mel": mel, "pos_mel": pos_mel, "pos_text": pos_text, } class AsrCollator(Collator): def __init__(self, pad_value: float = 0.0): super().__init__() self.pad_value = pad_value def apply(self, batch): # Puts each data field into a tensor with outer dimension batch size if isinstance(batch[0], collections.Mapping): text = [d["text"] for d in batch] text_input = [d["text_input"] for d in batch] text_output = [d["text_output"] for d in batch] text_length = [d["text_length"] for d in batch] mel = [d["mel"] for d in batch] mel_length = [d["mel"].shape[0] for d in batch] pos_mel = [d["pos_mel"] for d in batch] pos_text = [d["pos_text"] for d in batch] text = [ i for i, _ in sorted( zip(text, mel_length), key=lambda x: x[1], reverse=True ) ] text_input = [ i for i, _ in sorted( zip(text_input, mel_length), key=lambda x: x[1], reverse=True ) ] text_output = [ i for i, _ in sorted( zip(text_output, mel_length), key=lambda x: x[1], reverse=True ) ] text_length = [ i for i, _ in sorted( zip(text_length, mel_length), key=lambda x: x[1], reverse=True ) ] mel = [ i for i, _ in sorted( zip(mel, mel_length), key=lambda x: x[1], reverse=True ) ] pos_text = [ i for i, _ in sorted( zip(pos_text, mel_length), key=lambda x: x[1], reverse=True ) ] pos_mel = [ i for i, _ in sorted( zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True ) ] mel_length = sorted(mel_length, reverse=True) # PAD sequences with largest length of the batch text_input = _prepare_data(text_input).astype(np.int32) text_output = _prepare_data(text_output).astype(np.int32) mel = _pad_mel(mel) pos_mel = _prepare_data(pos_mel).astype(np.int32) pos_text = _prepare_data(pos_text).astype(np.int32) return ( mge.Tensor(text_input), mge.Tensor(text_output), mge.Tensor(mel), mge.Tensor(pos_text), mge.Tensor(pos_mel), mge.Tensor(text_length), mge.Tensor(mel_length), ) raise TypeError( ( "batch must contain tensors, numbers, dicts or lists; found {}".format( type(batch[0]) ) ) ) def collate_fn_transformer_test(batch): # Puts each data field into a tensor with outer dimension batch size # if isinstance(batch[0], collections.Mapping): text = [batch["text"]] # for d in batch] text_input = batch["text_input"] text_output = batch["text_output"] text_length = batch["text_length"] mel = [batch["mel"]] mel_length = [batch["mel"].shape[1]] pos_mel = batch["pos_mel"] pos_text = batch["pos_text"] text = [ i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True) ] text_input = [ i for i, _ in sorted( zip(text_input, mel_length), key=lambda x: x[1], reverse=True ) ] text_output = [ i for i, _ in sorted( zip(text_output, mel_length), key=lambda x: x[1], reverse=True ) ] text_length = [ i for i, _ in sorted( zip(text_length, mel_length), key=lambda x: x[1], reverse=True ) ] mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)] pos_text = [ i for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True) ] pos_mel = [ i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True) ] mel_length = sorted(mel_length, reverse=True) # PAD sequences with largest length of the batch text_input = _prepare_data(text_input).astype(np.int32) text_output = _prepare_data(text_output).astype(np.int32) mel = _pad_mel(mel[0]) pos_mel = _prepare_data(pos_mel).astype(np.int32) pos_text = _prepare_data(pos_text).astype(np.int32) return ( mge.Tensor(text_input), mge.Tensor(text_output),
mge.Tensor(mel)
megengine.Tensor
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
49
Edit dataset card

Collection including luna-code/megengine